diff --git a/go.mod b/go.mod index 6af28e7c0..eb560f586 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( code.gitea.io/sdk/gitea v0.20.0 github.com/Masterminds/semver/v3 v3.3.1 github.com/argoproj-labs/argocd-operator v0.13.0 - github.com/argoproj/argo-cd/v2 v2.13.3 + github.com/argoproj/argo-cd/v2 v2.14.9 github.com/go-errors/errors v1.5.1 github.com/go-git/go-git/v5 v5.14.0 github.com/go-logr/logr v1.4.2 @@ -31,7 +31,7 @@ require ( ) require ( - cloud.google.com/go/compute/metadata v0.3.0 // indirect + cloud.google.com/go/compute/metadata v0.5.0 // indirect dario.cat/mergo v1.0.1 // indirect github.com/42wim/httpsig v1.2.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect @@ -41,14 +41,16 @@ require ( github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.1.5 // indirect - github.com/argoproj/gitops-engine v0.7.1-0.20240905010810-bd7681ae3f8b // indirect + github.com/argoproj/gitops-engine v0.7.1-0.20250328191959-6d3cf122b03f // indirect github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect + github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect github.com/bombsimon/logrusr/v2 v2.0.1 // indirect - github.com/bradleyfalzon/ghinstallation/v2 v2.11.0 // indirect + github.com/bradleyfalzon/ghinstallation/v2 v2.12.0 // indirect + github.com/casbin/casbin/v2 v2.102.0 // indirect + github.com/casbin/govaluate v1.2.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect github.com/cloudflare/circl v1.6.0 // indirect @@ -64,7 +66,7 @@ require ( github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/camelcase v1.0.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-fed/httpsig v1.1.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect @@ -77,13 +79,13 @@ require ( github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.5.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/go-github/v62 v62.0.0 // indirect + github.com/google/go-github/v66 v66.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect @@ -119,11 +121,11 @@ require ( github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pjbgf/sha1cd v0.3.2 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/prometheus/client_golang v1.20.4 // indirect + github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/redis/go-redis/v9 v9.6.1 // indirect + github.com/redis/go-redis/v9 v9.7.1 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/segmentio/backo-go v1.0.0 // indirect @@ -146,18 +148,18 @@ require ( go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect golang.org/x/net v0.35.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/sync v0.11.0 // indirect golang.org/x/sys v0.30.0 // indirect golang.org/x/term v0.29.0 // indirect golang.org/x/text v0.22.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/time v0.8.0 // indirect golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc v1.66.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/grpc v1.68.1 // indirect google.golang.org/protobuf v1.36.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect @@ -178,7 +180,7 @@ require ( sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.17.2 // indirect sigs.k8s.io/kustomize/kyaml v0.17.1 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.4-0.20241211184406-7bf59b3d70ee // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index d7f8965ad..e3e46bed0 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= +cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= code.gitea.io/sdk/gitea v0.20.0 h1:Zm/QDwwZK1awoM4AxdjeAQbxolzx2rIP8dDfmKu+KoU= code.gitea.io/sdk/gitea v0.20.0/go.mod h1:faouBHC/zyx5wLgjmRKR62ydyvMzwWf3QnU0bH7Cw6U= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= @@ -36,10 +37,10 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuW github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/argoproj-labs/argocd-operator v0.13.0 h1:awHnBOoGXLaHWalhPNaIBBTbhcWK9CUZPUY8mdPDLyE= github.com/argoproj-labs/argocd-operator v0.13.0/go.mod h1:C+XJqZ/Amd2+HWo8DznSo3pSUNfsC6woSrNRWGHze2g= -github.com/argoproj/argo-cd/v2 v2.13.3 h1:pP0MdX9hUnOd5MDJ6VkspLFctoVCdDtlAv9EQOA94TI= -github.com/argoproj/argo-cd/v2 v2.13.3/go.mod h1:RC23V2744nhZstZVpLCWTQLT2gR0+IXGC3GTBCI6M+I= -github.com/argoproj/gitops-engine v0.7.1-0.20240905010810-bd7681ae3f8b h1:wOPWJ5MBScQO767WpU55oUJDXObfvPL0EfAYWxogbSw= -github.com/argoproj/gitops-engine v0.7.1-0.20240905010810-bd7681ae3f8b/go.mod h1:b1vuwkyMUszyUK+USUJqC8vJijnQsEPNDpC+sDdDLtM= +github.com/argoproj/argo-cd/v2 v2.14.9 h1:a6olDRG9HGvkF1gWJ3plvfdrX/tk7cu3ZbkVS0y/rMU= +github.com/argoproj/argo-cd/v2 v2.14.9/go.mod h1:50mfVqU+TKu2qIUwELpJ3+lmDKEuwEVj/0caQhYqDz8= +github.com/argoproj/gitops-engine v0.7.1-0.20250328191959-6d3cf122b03f h1:T18BJdtZF/HWdkyCqcNI6kQ3SbIomn6g+AZtZtvQUjE= +github.com/argoproj/gitops-engine v0.7.1-0.20250328191959-6d3cf122b03f/go.mod h1:WsnykM8idYRUnneeT31cM/Fq/ZsjkefCbjiD8ioCJkU= github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1 h1:qsHwwOJ21K2Ao0xPju1sNuqphyMnMYkyB3ZLoLtxWpo= github.com/argoproj/pkg v0.13.7-0.20230626144333-d56162821bd1/go.mod h1:CZHlkyAD1/+FbEn6cB2DQTj48IoLGvEYsWEvtzP3238= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= @@ -50,18 +51,23 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q= +github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bombsimon/logrusr/v2 v2.0.1 h1:1VgxVNQMCvjirZIYaT9JYn6sAVGVEcNtRE0y4mvaOAM= github.com/bombsimon/logrusr/v2 v2.0.1/go.mod h1:ByVAX+vHdLGAfdroiMg6q0zgq2FODY2lc5YJvzmOJio= -github.com/bradleyfalzon/ghinstallation/v2 v2.11.0 h1:R9d0v+iobRHSaE4wKUnXFiZp53AL4ED5MzgEMwGTZag= -github.com/bradleyfalzon/ghinstallation/v2 v2.11.0/go.mod h1:0LWKQwOHewXO/1acI6TtyE0Xc4ObDb2rFN7eHBAG71M= +github.com/bradleyfalzon/ghinstallation/v2 v2.12.0 h1:k8oVjGhZel2qmCUsYwSE34jPNT9DL2wCBOtugsHv26g= +github.com/bradleyfalzon/ghinstallation/v2 v2.12.0/go.mod h1:V4gJcNyAftH0rXpRp1SUVUuh+ACxOH1xOk/ZzkRHltg= github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/casbin/casbin/v2 v2.102.0 h1:weq9iSThUSL21SH3VrwoKa2DgRsaYMfjRNX/yOU3Foo= +github.com/casbin/casbin/v2 v2.102.0/go.mod h1:LO7YPez4dX3LgoTCqSQAleQDo0S0BeZBDxYnPUl95Ng= +github.com/casbin/govaluate v1.2.0 h1:wXCXFmqyY+1RwiKfYo3jMKyrtZmOL3kHwaqDyCPOYak= +github.com/casbin/govaluate v1.2.0/go.mod h1:G/UnbIjZk/0uMNaLwZZmFQrR72tYRZWQkO70si/iR7A= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -127,8 +133,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -183,14 +189,15 @@ github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6Wezm github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -227,8 +234,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwMmHdhl4= -github.com/google/go-github/v62 v62.0.0/go.mod h1:EMxeUqGJq2xRu9DYBMwel/mr7kZrzUOfQmmpYrZn2a4= +github.com/google/go-github/v66 v66.0.0 h1:ADJsaXj9UotwdgK8/iFZtv7MLc8E8WBl62WLd/D/9+M= +github.com/google/go-github/v66 v66.0.0/go.mod h1:+4SO9Zkuyf8ytMj0csN1NR/5OTR+MfqPp8P8dVlcvY4= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -409,8 +416,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= -github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= @@ -419,8 +426,8 @@ github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/redis/go-redis/v9 v9.0.0-rc.4/go.mod h1:Vo3EsyWnicKnSKCA7HhgnvnyA74wOA69Cd2Meli5mmA= -github.com/redis/go-redis/v9 v9.6.1 h1:HHDteefn6ZkTtY5fGUE8tj8uy85AHk6zP7CpzIAM0y4= -github.com/redis/go-redis/v9 v9.6.1/go.mod h1:0C0c6ycQsdpVNQpxb1njEQIqkx5UcsM8FJCQLgE9+RA= +github.com/redis/go-redis/v9 v9.7.1 h1:4LhKRCIduqXqtvCUlaq9c8bdHOkICjDMrr1+Zb3osAc= +github.com/redis/go-redis/v9 v9.7.1/go.mod h1:f6zhXITC7JUJIlPEiBOTXxJgPLdZcA93GewI7inzyWw= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= @@ -531,8 +538,8 @@ golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5D golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= -golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= +golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -593,8 +600,8 @@ golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -705,12 +712,13 @@ golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -747,15 +755,15 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 h1:pPJltXNxVzT4pK9yD8vR9X75DaWYYmLGMsEvBfFQZzQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.66.2 h1:3QdXkuq3Bkh7w+ywLdLvM56cmGvQHUMZpiCzt6Rqaoo= -google.golang.org/grpc v1.66.2/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -850,8 +858,9 @@ sigs.k8s.io/kustomize/api v0.17.2/go.mod h1:UWTz9Ct+MvoeQsHcJ5e+vziRRkwimm3HytpZ sigs.k8s.io/kustomize/kyaml v0.17.1 h1:TnxYQxFXzbmNG6gOINgGWQt09GghzgTP6mIurOgrLCQ= sigs.k8s.io/kustomize/kyaml v0.17.1/go.mod h1:9V0mCjIEYjlXuCdYsSXvyoy2BTsLESH7TlGV81S282U= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/structured-merge-diff/v4 v4.4.4-0.20241211184406-7bf59b3d70ee h1:ipT2c6nEOdAfBwiwW1oI0mkrlPabbXEFmJBrg6B+OR8= +sigs.k8s.io/structured-merge-diff/v4 v4.4.4-0.20241211184406-7bf59b3d70ee/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md index 967e06074..9594e1e27 100644 --- a/vendor/cloud.google.com/go/compute/metadata/CHANGES.md +++ b/vendor/cloud.google.com/go/compute/metadata/CHANGES.md @@ -1,5 +1,24 @@ # Changes +## [0.5.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.4.0...compute/metadata/v0.5.0) (2024-07-10) + + +### Features + +* **compute/metadata:** Add sys check for windows OnGCE ([#10521](https://github.com/googleapis/google-cloud-go/issues/10521)) ([3b9a830](https://github.com/googleapis/google-cloud-go/commit/3b9a83063960d2a2ac20beb47cc15818a68bd302)) + +## [0.4.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.3.0...compute/metadata/v0.4.0) (2024-07-01) + + +### Features + +* **compute/metadata:** Add context for all functions/methods ([#10370](https://github.com/googleapis/google-cloud-go/issues/10370)) ([66b8efe](https://github.com/googleapis/google-cloud-go/commit/66b8efe7ad877e052b2987bb4475477e38c67bb3)) + + +### Documentation + +* **compute/metadata:** Update OnGCE description ([#10408](https://github.com/googleapis/google-cloud-go/issues/10408)) ([6a46dca](https://github.com/googleapis/google-cloud-go/commit/6a46dca4eae4f88ec6f88822e01e5bf8aeca787f)) + ## [0.3.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.3...compute/metadata/v0.3.0) (2024-04-15) diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go index f67e3c7ee..345080b72 100644 --- a/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -28,7 +28,6 @@ import ( "net/http" "net/url" "os" - "runtime" "strings" "sync" "time" @@ -88,16 +87,16 @@ func (suffix NotDefinedError) Error() string { return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) } -func (c *cachedValue) get(cl *Client) (v string, err error) { +func (c *cachedValue) get(ctx context.Context, cl *Client) (v string, err error) { defer c.mu.Unlock() c.mu.Lock() if c.v != "" { return c.v, nil } if c.trim { - v, err = cl.getTrimmed(context.Background(), c.k) + v, err = cl.getTrimmed(ctx, c.k) } else { - v, err = cl.GetWithContext(context.Background(), c.k) + v, err = cl.GetWithContext(ctx, c.k) } if err == nil { c.v = v @@ -110,7 +109,9 @@ var ( onGCE bool ) -// OnGCE reports whether this process is running on Google Compute Engine. +// OnGCE reports whether this process is running on Google Compute Platforms. +// NOTE: True returned from `OnGCE` does not guarantee that the metadata server +// is accessible from this process and have all the metadata defined. func OnGCE() bool { onGCEOnce.Do(initOnGCE) return onGCE @@ -188,21 +189,9 @@ func testOnGCE() bool { return <-resc } -// systemInfoSuggestsGCE reports whether the local system (without -// doing network requests) suggests that we're running on GCE. If this -// returns true, testOnGCE tries a bit harder to reach its metadata -// server. -func systemInfoSuggestsGCE() bool { - if runtime.GOOS != "linux" { - // We don't have any non-Linux clues available, at least yet. - return false - } - slurp, _ := os.ReadFile("/sys/class/dmi/id/product_name") - name := strings.TrimSpace(string(slurp)) - return name == "Google" || name == "Google Compute Engine" -} - // Subscribe calls Client.SubscribeWithContext on the default client. +// +// Deprecated: Please use the context aware variant [SubscribeWithContext]. func Subscribe(suffix string, fn func(v string, ok bool) error) error { return defaultClient.SubscribeWithContext(context.Background(), suffix, func(ctx context.Context, v string, ok bool) error { return fn(v, ok) }) } @@ -225,55 +214,188 @@ func GetWithContext(ctx context.Context, suffix string) (string, error) { } // ProjectID returns the current instance's project ID string. -func ProjectID() (string, error) { return defaultClient.ProjectID() } +// +// Deprecated: Please use the context aware variant [ProjectIDWithContext]. +func ProjectID() (string, error) { + return defaultClient.ProjectIDWithContext(context.Background()) +} + +// ProjectIDWithContext returns the current instance's project ID string. +func ProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.ProjectIDWithContext(ctx) +} // NumericProjectID returns the current instance's numeric project ID. -func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } +// +// Deprecated: Please use the context aware variant [NumericProjectIDWithContext]. +func NumericProjectID() (string, error) { + return defaultClient.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func NumericProjectIDWithContext(ctx context.Context) (string, error) { + return defaultClient.NumericProjectIDWithContext(ctx) +} // InternalIP returns the instance's primary internal IP address. -func InternalIP() (string, error) { return defaultClient.InternalIP() } +// +// Deprecated: Please use the context aware variant [InternalIPWithContext]. +func InternalIP() (string, error) { + return defaultClient.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func InternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.InternalIPWithContext(ctx) +} // ExternalIP returns the instance's primary external (public) IP address. -func ExternalIP() (string, error) { return defaultClient.ExternalIP() } +// +// Deprecated: Please use the context aware variant [ExternalIPWithContext]. +func ExternalIP() (string, error) { + return defaultClient.ExternalIPWithContext(context.Background()) +} -// Email calls Client.Email on the default client. -func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) } +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func ExternalIPWithContext(ctx context.Context) (string, error) { + return defaultClient.ExternalIPWithContext(ctx) +} + +// Email calls Client.EmailWithContext on the default client. +// +// Deprecated: Please use the context aware variant [EmailWithContext]. +func Email(serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext calls Client.EmailWithContext on the default client. +func EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { + return defaultClient.EmailWithContext(ctx, serviceAccount) +} // Hostname returns the instance's hostname. This will be of the form // ".c..internal". -func Hostname() (string, error) { return defaultClient.Hostname() } +// +// Deprecated: Please use the context aware variant [HostnameWithContext]. +func Hostname() (string, error) { + return defaultClient.HostnameWithContext(context.Background()) +} + +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func HostnameWithContext(ctx context.Context) (string, error) { + return defaultClient.HostnameWithContext(ctx) +} // InstanceTags returns the list of user-defined instance tags, // assigned when initially creating a GCE instance. -func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } +// +// Deprecated: Please use the context aware variant [InstanceTagsWithContext]. +func InstanceTags() ([]string, error) { + return defaultClient.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTagsWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceTagsWithContext(ctx) +} // InstanceID returns the current VM's numeric instance ID. -func InstanceID() (string, error) { return defaultClient.InstanceID() } +// +// Deprecated: Please use the context aware variant [InstanceIDWithContext]. +func InstanceID() (string, error) { + return defaultClient.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func InstanceIDWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceIDWithContext(ctx) +} // InstanceName returns the current VM's instance ID string. -func InstanceName() (string, error) { return defaultClient.InstanceName() } +// +// Deprecated: Please use the context aware variant [InstanceNameWithContext]. +func InstanceName() (string, error) { + return defaultClient.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func InstanceNameWithContext(ctx context.Context) (string, error) { + return defaultClient.InstanceNameWithContext(ctx) +} // Zone returns the current VM's zone, such as "us-central1-b". -func Zone() (string, error) { return defaultClient.Zone() } +// +// Deprecated: Please use the context aware variant [ZoneWithContext]. +func Zone() (string, error) { + return defaultClient.ZoneWithContext(context.Background()) +} -// InstanceAttributes calls Client.InstanceAttributes on the default client. -func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func ZoneWithContext(ctx context.Context) (string, error) { + return defaultClient.ZoneWithContext(ctx) +} -// ProjectAttributes calls Client.ProjectAttributes on the default client. -func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } +// InstanceAttributes calls Client.InstanceAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributesWithContext. +func InstanceAttributes() ([]string, error) { + return defaultClient.InstanceAttributesWithContext(context.Background()) +} + +// InstanceAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.InstanceAttributesWithContext(ctx) +} -// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +// ProjectAttributes calls Client.ProjectAttributesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributesWithContext]. +func ProjectAttributes() ([]string, error) { + return defaultClient.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext calls Client.ProjectAttributesWithContext on the default client. +func ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return defaultClient.ProjectAttributesWithContext(ctx) +} + +// InstanceAttributeValue calls Client.InstanceAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [InstanceAttributeValueWithContext]. func InstanceAttributeValue(attr string) (string, error) { - return defaultClient.InstanceAttributeValue(attr) + return defaultClient.InstanceAttributeValueWithContext(context.Background(), attr) } -// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +// InstanceAttributeValueWithContext calls Client.InstanceAttributeValueWithContext on the default client. +func InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.InstanceAttributeValueWithContext(ctx, attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValueWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ProjectAttributeValueWithContext]. func ProjectAttributeValue(attr string) (string, error) { - return defaultClient.ProjectAttributeValue(attr) + return defaultClient.ProjectAttributeValueWithContext(context.Background(), attr) } -// Scopes calls Client.Scopes on the default client. -func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } +// ProjectAttributeValueWithContext calls Client.ProjectAttributeValueWithContext on the default client. +func ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return defaultClient.ProjectAttributeValueWithContext(ctx, attr) +} + +// Scopes calls Client.ScopesWithContext on the default client. +// +// Deprecated: Please use the context aware variant [ScopesWithContext]. +func Scopes(serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext calls Client.ScopesWithContext on the default client. +func ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { + return defaultClient.ScopesWithContext(ctx, serviceAccount) +} func strsContains(ss []string, s string) bool { for _, v := range ss { @@ -296,7 +418,6 @@ func NewClient(c *http.Client) *Client { if c == nil { return defaultClient } - return &Client{hc: c} } @@ -381,6 +502,10 @@ func (c *Client) Get(suffix string) (string, error) { // // If the requested metadata is not defined, the returned error will // be of type NotDefinedError. +// +// NOTE: Without an extra deadline in the context this call can take in the +// worst case, with internal backoff retries, up to 15 seconds (e.g. when server +// is responding slowly). Pass context with additional timeouts when needed. func (c *Client) GetWithContext(ctx context.Context, suffix string) (string, error) { val, _, err := c.getETag(ctx, suffix) return val, err @@ -392,8 +517,8 @@ func (c *Client) getTrimmed(ctx context.Context, suffix string) (s string, err e return } -func (c *Client) lines(suffix string) ([]string, error) { - j, err := c.GetWithContext(context.Background(), suffix) +func (c *Client) lines(ctx context.Context, suffix string) ([]string, error) { + j, err := c.GetWithContext(ctx, suffix) if err != nil { return nil, err } @@ -405,45 +530,104 @@ func (c *Client) lines(suffix string) ([]string, error) { } // ProjectID returns the current instance's project ID string. -func (c *Client) ProjectID() (string, error) { return projID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.ProjectIDWithContext]. +func (c *Client) ProjectID() (string, error) { return c.ProjectIDWithContext(context.Background()) } + +// ProjectIDWithContext returns the current instance's project ID string. +func (c *Client) ProjectIDWithContext(ctx context.Context) (string, error) { return projID.get(ctx, c) } // NumericProjectID returns the current instance's numeric project ID. -func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } +// +// Deprecated: Please use the context aware variant [Client.NumericProjectIDWithContext]. +func (c *Client) NumericProjectID() (string, error) { + return c.NumericProjectIDWithContext(context.Background()) +} + +// NumericProjectIDWithContext returns the current instance's numeric project ID. +func (c *Client) NumericProjectIDWithContext(ctx context.Context) (string, error) { + return projNum.get(ctx, c) +} // InstanceID returns the current VM's numeric instance ID. -func (c *Client) InstanceID() (string, error) { return instID.get(c) } +// +// Deprecated: Please use the context aware variant [Client.InstanceIDWithContext]. +func (c *Client) InstanceID() (string, error) { + return c.InstanceIDWithContext(context.Background()) +} + +// InstanceIDWithContext returns the current VM's numeric instance ID. +func (c *Client) InstanceIDWithContext(ctx context.Context) (string, error) { + return instID.get(ctx, c) +} // InternalIP returns the instance's primary internal IP address. +// +// Deprecated: Please use the context aware variant [Client.InternalIPWithContext]. func (c *Client) InternalIP() (string, error) { - return c.getTrimmed(context.Background(), "instance/network-interfaces/0/ip") + return c.InternalIPWithContext(context.Background()) +} + +// InternalIPWithContext returns the instance's primary internal IP address. +func (c *Client) InternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/ip") } // Email returns the email address associated with the service account. -// The account may be empty or the string "default" to use the instance's -// main account. +// +// Deprecated: Please use the context aware variant [Client.EmailWithContext]. func (c *Client) Email(serviceAccount string) (string, error) { + return c.EmailWithContext(context.Background(), serviceAccount) +} + +// EmailWithContext returns the email address associated with the service account. +// The serviceAccount parameter default value (empty string or "default" value) +// will use the instance's main account. +func (c *Client) EmailWithContext(ctx context.Context, serviceAccount string) (string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.getTrimmed(context.Background(), "instance/service-accounts/"+serviceAccount+"/email") + return c.getTrimmed(ctx, "instance/service-accounts/"+serviceAccount+"/email") } // ExternalIP returns the instance's primary external (public) IP address. +// +// Deprecated: Please use the context aware variant [Client.ExternalIPWithContext]. func (c *Client) ExternalIP() (string, error) { - return c.getTrimmed(context.Background(), "instance/network-interfaces/0/access-configs/0/external-ip") + return c.ExternalIPWithContext(context.Background()) +} + +// ExternalIPWithContext returns the instance's primary external (public) IP address. +func (c *Client) ExternalIPWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/network-interfaces/0/access-configs/0/external-ip") } // Hostname returns the instance's hostname. This will be of the form // ".c..internal". +// +// Deprecated: Please use the context aware variant [Client.HostnameWithContext]. func (c *Client) Hostname() (string, error) { - return c.getTrimmed(context.Background(), "instance/hostname") + return c.HostnameWithContext(context.Background()) } -// InstanceTags returns the list of user-defined instance tags, -// assigned when initially creating a GCE instance. +// HostnameWithContext returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) HostnameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags. +// +// Deprecated: Please use the context aware variant [Client.InstanceTagsWithContext]. func (c *Client) InstanceTags() ([]string, error) { + return c.InstanceTagsWithContext(context.Background()) +} + +// InstanceTagsWithContext returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTagsWithContext(ctx context.Context) ([]string, error) { var s []string - j, err := c.GetWithContext(context.Background(), "instance/tags") + j, err := c.GetWithContext(ctx, "instance/tags") if err != nil { return nil, err } @@ -454,13 +638,27 @@ func (c *Client) InstanceTags() ([]string, error) { } // InstanceName returns the current VM's instance ID string. +// +// Deprecated: Please use the context aware variant [Client.InstanceNameWithContext]. func (c *Client) InstanceName() (string, error) { - return c.getTrimmed(context.Background(), "instance/name") + return c.InstanceNameWithContext(context.Background()) +} + +// InstanceNameWithContext returns the current VM's instance ID string. +func (c *Client) InstanceNameWithContext(ctx context.Context) (string, error) { + return c.getTrimmed(ctx, "instance/name") } // Zone returns the current VM's zone, such as "us-central1-b". +// +// Deprecated: Please use the context aware variant [Client.ZoneWithContext]. func (c *Client) Zone() (string, error) { - zone, err := c.getTrimmed(context.Background(), "instance/zone") + return c.ZoneWithContext(context.Background()) +} + +// ZoneWithContext returns the current VM's zone, such as "us-central1-b". +func (c *Client) ZoneWithContext(ctx context.Context) (string, error) { + zone, err := c.getTrimmed(ctx, "instance/zone") // zone is of the form "projects//zones/". if err != nil { return "", err @@ -471,12 +669,34 @@ func (c *Client) Zone() (string, error) { // InstanceAttributes returns the list of user-defined attributes, // assigned when initially creating a GCE VM instance. The value of an // attribute can be obtained with InstanceAttributeValue. -func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributesWithContext]. +func (c *Client) InstanceAttributes() ([]string, error) { + return c.InstanceAttributesWithContext(context.Background()) +} + +// InstanceAttributesWithContext returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "instance/attributes/") +} // ProjectAttributes returns the list of user-defined attributes // applying to the project as a whole, not just this VM. The value of // an attribute can be obtained with ProjectAttributeValue. -func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributesWithContext]. +func (c *Client) ProjectAttributes() ([]string, error) { + return c.ProjectAttributesWithContext(context.Background()) +} + +// ProjectAttributesWithContext returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributesWithContext(ctx context.Context) ([]string, error) { + return c.lines(ctx, "project/attributes/") +} // InstanceAttributeValue returns the value of the provided VM // instance attribute. @@ -486,8 +706,22 @@ func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project // // InstanceAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.InstanceAttributeValueWithContext]. func (c *Client) InstanceAttributeValue(attr string) (string, error) { - return c.GetWithContext(context.Background(), "instance/attributes/"+attr) + return c.InstanceAttributeValueWithContext(context.Background(), attr) +} + +// InstanceAttributeValueWithContext returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "instance/attributes/"+attr) } // ProjectAttributeValue returns the value of the provided @@ -498,18 +732,41 @@ func (c *Client) InstanceAttributeValue(attr string) (string, error) { // // ProjectAttributeValue may return ("", nil) if the attribute was // defined to be the empty string. +// +// Deprecated: Please use the context aware variant [Client.ProjectAttributeValueWithContext]. func (c *Client) ProjectAttributeValue(attr string) (string, error) { - return c.GetWithContext(context.Background(), "project/attributes/"+attr) + return c.ProjectAttributeValueWithContext(context.Background(), attr) +} + +// ProjectAttributeValueWithContext returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValueWithContext(ctx context.Context, attr string) (string, error) { + return c.GetWithContext(ctx, "project/attributes/"+attr) } // Scopes returns the service account scopes for the given account. // The account may be empty or the string "default" to use the instance's // main account. +// +// Deprecated: Please use the context aware variant [Client.ScopesWithContext]. func (c *Client) Scopes(serviceAccount string) ([]string, error) { + return c.ScopesWithContext(context.Background(), serviceAccount) +} + +// ScopesWithContext returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) ScopesWithContext(ctx context.Context, serviceAccount string) ([]string, error) { if serviceAccount == "" { serviceAccount = "default" } - return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") + return c.lines(ctx, "instance/service-accounts/"+serviceAccount+"/scopes") } // Subscribe subscribes to a value from the metadata service. diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck.go b/vendor/cloud.google.com/go/compute/metadata/syscheck.go new file mode 100644 index 000000000..e0704fa64 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck.go @@ -0,0 +1,26 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !windows && !linux + +package metadata + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + // We don't currently have checks for other GOOS + return false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go new file mode 100644 index 000000000..74689acbb --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_linux.go @@ -0,0 +1,28 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build linux + +package metadata + +import ( + "os" + "strings" +) + +func systemInfoSuggestsGCE() bool { + b, _ := os.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(b)) + return name == "Google" || name == "Google Compute Engine" +} diff --git a/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go new file mode 100644 index 000000000..c0ce62787 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/syscheck_windows.go @@ -0,0 +1,38 @@ +// Copyright 2024 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build windows + +package metadata + +import ( + "strings" + + "golang.org/x/sys/windows/registry" +) + +func systemInfoSuggestsGCE() bool { + k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SYSTEM\HardwareConfig\Current`, registry.QUERY_VALUE) + if err != nil { + return false + } + defer k.Close() + + s, _, err := k.GetStringValue("SystemProductName") + if err != nil { + return false + } + s = strings.TrimSpace(s) + return strings.HasPrefix(s, "Google") +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/assets/badge.svg b/vendor/github.com/argoproj/argo-cd/v2/assets/badge.svg new file mode 100644 index 000000000..f1dab6b6c --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/assets/badge.svg @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/vendor/github.com/argoproj/argo-cd/v2/assets/builtin-policy.csv b/vendor/github.com/argoproj/argo-cd/v2/assets/builtin-policy.csv new file mode 100644 index 000000000..088f5fbd0 --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/assets/builtin-policy.csv @@ -0,0 +1,52 @@ +# Built-in policy which defines two roles: role:readonly and role:admin, +# and additionally assigns the admin user to the role:admin role. +# There are two policy formats: +# 1. Applications, applicationsets, logs, and exec (which belong to a project): +# p, , , , /, +# 2. All other resources: +# p, , , , , + +p, role:readonly, applications, get, */*, allow +p, role:readonly, certificates, get, *, allow +p, role:readonly, clusters, get, *, allow +p, role:readonly, repositories, get, *, allow +p, role:readonly, write-repositories, get, *, allow +p, role:readonly, projects, get, *, allow +p, role:readonly, accounts, get, *, allow +p, role:readonly, gpgkeys, get, *, allow +p, role:readonly, logs, get, */*, allow + +p, role:admin, applications, create, */*, allow +p, role:admin, applications, update, */*, allow +p, role:admin, applications, update/*, */*, allow +p, role:admin, applications, delete, */*, allow +p, role:admin, applications, delete/*, */*, allow +p, role:admin, applications, sync, */*, allow +p, role:admin, applications, override, */*, allow +p, role:admin, applications, action/*, */*, allow +p, role:admin, applicationsets, get, */*, allow +p, role:admin, applicationsets, create, */*, allow +p, role:admin, applicationsets, update, */*, allow +p, role:admin, applicationsets, delete, */*, allow +p, role:admin, certificates, create, *, allow +p, role:admin, certificates, update, *, allow +p, role:admin, certificates, delete, *, allow +p, role:admin, clusters, create, *, allow +p, role:admin, clusters, update, *, allow +p, role:admin, clusters, delete, *, allow +p, role:admin, repositories, create, *, allow +p, role:admin, repositories, update, *, allow +p, role:admin, repositories, delete, *, allow +p, role:admin, write-repositories, create, *, allow +p, role:admin, write-repositories, update, *, allow +p, role:admin, write-repositories, delete, *, allow +p, role:admin, projects, create, *, allow +p, role:admin, projects, update, *, allow +p, role:admin, projects, delete, *, allow +p, role:admin, accounts, update, *, allow +p, role:admin, gpgkeys, create, *, allow +p, role:admin, gpgkeys, delete, *, allow +p, role:admin, exec, create, */*, allow + +g, role:admin, role:readonly +g, admin, role:admin diff --git a/vendor/github.com/argoproj/argo-cd/v2/assets/embed.go b/vendor/github.com/argoproj/argo-cd/v2/assets/embed.go new file mode 100644 index 000000000..ac148cafd --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/assets/embed.go @@ -0,0 +1,8 @@ +package assets + +import "embed" + +// Embedded contains embedded assets +// +//go:embed * +var Embedded embed.FS diff --git a/vendor/github.com/argoproj/argo-cd/v2/assets/model.conf b/vendor/github.com/argoproj/argo-cd/v2/assets/model.conf new file mode 100644 index 000000000..e53d9fe89 --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/assets/model.conf @@ -0,0 +1,14 @@ +[request_definition] +r = sub, res, act, obj + +[policy_definition] +p = sub, res, act, obj, eft + +[role_definition] +g = _, _ + +[policy_effect] +e = some(where (p.eft == allow)) && !some(where (p.eft == deny)) + +[matchers] +m = g(r.sub, p.sub) && globOrRegexMatch(r.res, p.res) && globOrRegexMatch(r.act, p.act) && globOrRegexMatch(r.obj, p.obj) diff --git a/vendor/github.com/argoproj/argo-cd/v2/assets/swagger.json b/vendor/github.com/argoproj/argo-cd/v2/assets/swagger.json new file mode 100644 index 000000000..9436a6a6f --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/assets/swagger.json @@ -0,0 +1,10209 @@ +{ + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "swagger": "2.0", + "info": { + "description": "Description of all APIs", + "title": "Consolidate Services", + "version": "version not set" + }, + "paths": { + "/api/v1/account": { + "get": { + "tags": [ + "AccountService" + ], + "summary": "ListAccounts returns the list of accounts", + "operationId": "AccountService_ListAccounts", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/accountAccountsList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/account/can-i/{resource}/{action}/{subresource}": { + "get": { + "tags": [ + "AccountService" + ], + "summary": "CanI checks if the current account has permission to perform an action", + "operationId": "AccountService_CanI", + "parameters": [ + { + "type": "string", + "name": "resource", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "action", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "subresource", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/accountCanIResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/account/password": { + "put": { + "tags": [ + "AccountService" + ], + "summary": "UpdatePassword updates an account's password to a new value", + "operationId": "AccountService_UpdatePassword", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/accountUpdatePasswordRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/accountUpdatePasswordResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/account/{name}": { + "get": { + "tags": [ + "AccountService" + ], + "summary": "GetAccount returns an account", + "operationId": "AccountService_GetAccount", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/accountAccount" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/account/{name}/token": { + "post": { + "tags": [ + "AccountService" + ], + "summary": "CreateToken creates a token", + "operationId": "AccountService_CreateToken", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/accountCreateTokenRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/accountCreateTokenResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/account/{name}/token/{id}": { + "delete": { + "tags": [ + "AccountService" + ], + "summary": "DeleteToken deletes a token", + "operationId": "AccountService_DeleteToken", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/accountEmptyResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "List returns list of applications", + "operationId": "ApplicationService_List", + "parameters": [ + { + "type": "string", + "description": "the application's name.", + "name": "name", + "in": "query" + }, + { + "type": "string", + "description": "forces application reconciliation if set to 'hard'.", + "name": "refresh", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "the project names to restrict returned list applications.", + "name": "projects", + "in": "query" + }, + { + "type": "string", + "description": "when specified with a watch call, shows changes that occur after that particular version of a resource.", + "name": "resourceVersion", + "in": "query" + }, + { + "type": "string", + "description": "the selector to restrict returned list to applications only with matched labels.", + "name": "selector", + "in": "query" + }, + { + "type": "string", + "description": "the repoURL to restrict returned list applications.", + "name": "repo", + "in": "query" + }, + { + "type": "string", + "description": "the application's namespace.", + "name": "appNamespace", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "the project names to restrict returned list applications (legacy name for backwards-compatibility).", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1ApplicationList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "ApplicationService" + ], + "summary": "Create creates an application", + "operationId": "ApplicationService_Create", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1Application" + } + }, + { + "type": "boolean", + "name": "upsert", + "in": "query" + }, + { + "type": "boolean", + "name": "validate", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Application" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/manifestsWithFiles": { + "post": { + "tags": [ + "ApplicationService" + ], + "summary": "GetManifestsWithFiles returns application manifests using provided files to generate them", + "operationId": "ApplicationService_GetManifestsWithFiles", + "parameters": [ + { + "description": " (streaming inputs)", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/applicationApplicationManifestQueryWithFilesWrapper" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repositoryManifestResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{application.metadata.name}": { + "put": { + "tags": [ + "ApplicationService" + ], + "summary": "Update updates an application", + "operationId": "ApplicationService_Update", + "parameters": [ + { + "type": "string", + "description": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names\n+optional", + "name": "application.metadata.name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1Application" + } + }, + { + "type": "boolean", + "name": "validate", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Application" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{applicationName}/managed-resources": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "ManagedResources returns list of managed resources", + "operationId": "ApplicationService_ManagedResources", + "parameters": [ + { + "type": "string", + "name": "applicationName", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "name", + "in": "query" + }, + { + "type": "string", + "name": "version", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationManagedResourcesResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{applicationName}/resource-tree": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "ResourceTree returns resource tree", + "operationId": "ApplicationService_ResourceTree", + "parameters": [ + { + "type": "string", + "name": "applicationName", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "name", + "in": "query" + }, + { + "type": "string", + "name": "version", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1ApplicationTree" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "Get returns an application by name", + "operationId": "ApplicationService_Get", + "parameters": [ + { + "type": "string", + "description": "the application's name", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "forces application reconciliation if set to 'hard'.", + "name": "refresh", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "the project names to restrict returned list applications.", + "name": "projects", + "in": "query" + }, + { + "type": "string", + "description": "when specified with a watch call, shows changes that occur after that particular version of a resource.", + "name": "resourceVersion", + "in": "query" + }, + { + "type": "string", + "description": "the selector to restrict returned list to applications only with matched labels.", + "name": "selector", + "in": "query" + }, + { + "type": "string", + "description": "the repoURL to restrict returned list applications.", + "name": "repo", + "in": "query" + }, + { + "type": "string", + "description": "the application's namespace.", + "name": "appNamespace", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "the project names to restrict returned list applications (legacy name for backwards-compatibility).", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Application" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "delete": { + "tags": [ + "ApplicationService" + ], + "summary": "Delete deletes an application", + "operationId": "ApplicationService_Delete", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "boolean", + "name": "cascade", + "in": "query" + }, + { + "type": "string", + "name": "propagationPolicy", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationApplicationResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "patch": { + "tags": [ + "ApplicationService" + ], + "summary": "Patch patch an application", + "operationId": "ApplicationService_Patch", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/applicationApplicationPatchRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Application" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/events": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "ListResourceEvents returns a list of event resources", + "operationId": "ApplicationService_ListResourceEvents", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "resourceNamespace", + "in": "query" + }, + { + "type": "string", + "name": "resourceName", + "in": "query" + }, + { + "type": "string", + "name": "resourceUID", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1EventList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/links": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "ListLinks returns the list of all application deep links", + "operationId": "ApplicationService_ListLinks", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationLinksResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/logs": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "PodLogs returns stream of log entries for the specified pod. Pod", + "operationId": "ApplicationService_PodLogs2", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "podName", + "in": "query" + }, + { + "type": "string", + "name": "container", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "name": "sinceSeconds", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.", + "name": "sinceTime.seconds", + "in": "query" + }, + { + "type": "integer", + "format": "int32", + "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.", + "name": "sinceTime.nanos", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "name": "tailLines", + "in": "query" + }, + { + "type": "boolean", + "name": "follow", + "in": "query" + }, + { + "type": "string", + "name": "untilTime", + "in": "query" + }, + { + "type": "string", + "name": "filter", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "resourceName", + "in": "query" + }, + { + "type": "boolean", + "name": "previous", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.(streaming responses)", + "schema": { + "type": "object", + "title": "Stream result of applicationLogEntry", + "properties": { + "error": { + "$ref": "#/definitions/runtimeStreamError" + }, + "result": { + "$ref": "#/definitions/applicationLogEntry" + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/manifests": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "GetManifests returns application manifests", + "operationId": "ApplicationService_GetManifests", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "revision", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string", + "format": "int64" + }, + "collectionFormat": "multi", + "name": "sourcePositions", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "name": "revisions", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repositoryManifestResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/operation": { + "delete": { + "tags": [ + "ApplicationService" + ], + "summary": "TerminateOperation terminates the currently running operation", + "operationId": "ApplicationService_TerminateOperation", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationOperationTerminateResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/pods/{podName}/logs": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "PodLogs returns stream of log entries for the specified pod. Pod", + "operationId": "ApplicationService_PodLogs", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "podName", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "container", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "name": "sinceSeconds", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.", + "name": "sinceTime.seconds", + "in": "query" + }, + { + "type": "integer", + "format": "int32", + "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.", + "name": "sinceTime.nanos", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "name": "tailLines", + "in": "query" + }, + { + "type": "boolean", + "name": "follow", + "in": "query" + }, + { + "type": "string", + "name": "untilTime", + "in": "query" + }, + { + "type": "string", + "name": "filter", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "resourceName", + "in": "query" + }, + { + "type": "boolean", + "name": "previous", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.(streaming responses)", + "schema": { + "type": "object", + "title": "Stream result of applicationLogEntry", + "properties": { + "error": { + "$ref": "#/definitions/runtimeStreamError" + }, + "result": { + "$ref": "#/definitions/applicationLogEntry" + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/resource": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "GetResource returns single application resource", + "operationId": "ApplicationService_GetResource", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "resourceName", + "in": "query" + }, + { + "type": "string", + "name": "version", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationApplicationResourceResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "ApplicationService" + ], + "summary": "PatchResource patch single application resource", + "operationId": "ApplicationService_PatchResource", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "string" + } + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "resourceName", + "in": "query" + }, + { + "type": "string", + "name": "version", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "string", + "name": "patchType", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationApplicationResourceResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "delete": { + "tags": [ + "ApplicationService" + ], + "summary": "DeleteResource deletes a single application resource", + "operationId": "ApplicationService_DeleteResource", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "resourceName", + "in": "query" + }, + { + "type": "string", + "name": "version", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "boolean", + "name": "force", + "in": "query" + }, + { + "type": "boolean", + "name": "orphan", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationApplicationResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/resource/actions": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "ListResourceActions returns list of resource actions", + "operationId": "ApplicationService_ListResourceActions", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "resourceName", + "in": "query" + }, + { + "type": "string", + "name": "version", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationResourceActionsListResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "ApplicationService" + ], + "summary": "RunResourceAction run resource action", + "operationId": "ApplicationService_RunResourceAction", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "string" + } + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "resourceName", + "in": "query" + }, + { + "type": "string", + "name": "version", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationApplicationResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/resource/links": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "ListResourceLinks returns the list of all resource deep links", + "operationId": "ApplicationService_ListResourceLinks", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "resourceName", + "in": "query" + }, + { + "type": "string", + "name": "version", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationLinksResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/revisions/{revision}/chartdetails": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "Get the chart metadata (description, maintainers, home) for a specific revision of the application", + "operationId": "ApplicationService_RevisionChartDetails", + "parameters": [ + { + "type": "string", + "description": "the application's name", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "the revision of the app", + "name": "revision", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "the application's namespace.", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + }, + { + "type": "integer", + "format": "int32", + "description": "source index (for multi source apps).", + "name": "sourceIndex", + "in": "query" + }, + { + "type": "integer", + "format": "int32", + "description": "versionId from historical data (for multi source apps).", + "name": "versionId", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1ChartDetails" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/revisions/{revision}/metadata": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "Get the meta-data (author, date, tags, message) for a specific revision of the application", + "operationId": "ApplicationService_RevisionMetadata", + "parameters": [ + { + "type": "string", + "description": "the application's name", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "the revision of the app", + "name": "revision", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "the application's namespace.", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + }, + { + "type": "integer", + "format": "int32", + "description": "source index (for multi source apps).", + "name": "sourceIndex", + "in": "query" + }, + { + "type": "integer", + "format": "int32", + "description": "versionId from historical data (for multi source apps).", + "name": "versionId", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RevisionMetadata" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/rollback": { + "post": { + "tags": [ + "ApplicationService" + ], + "summary": "Rollback syncs an application to its target state", + "operationId": "ApplicationService_Rollback", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/applicationApplicationRollbackRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Application" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/spec": { + "put": { + "tags": [ + "ApplicationService" + ], + "summary": "UpdateSpec updates an application spec", + "operationId": "ApplicationService_UpdateSpec", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1ApplicationSpec" + } + }, + { + "type": "boolean", + "name": "validate", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1ApplicationSpec" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/sync": { + "post": { + "tags": [ + "ApplicationService" + ], + "summary": "Sync syncs an application to its target state", + "operationId": "ApplicationService_Sync", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/applicationApplicationSyncRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Application" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applications/{name}/syncwindows": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "Get returns sync windows of the application", + "operationId": "ApplicationService_GetApplicationSyncWindows", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationApplicationSyncWindowsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applicationsets": { + "get": { + "tags": [ + "ApplicationSetService" + ], + "summary": "List returns list of applicationset", + "operationId": "ApplicationSetService_List", + "parameters": [ + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "the project names to restrict returned list applicationsets.", + "name": "projects", + "in": "query" + }, + { + "type": "string", + "description": "the selector to restrict returned list to applications only with matched labels.", + "name": "selector", + "in": "query" + }, + { + "type": "string", + "description": "The application set namespace. Default empty is argocd control plane namespace.", + "name": "appsetNamespace", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1ApplicationSetList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "ApplicationSetService" + ], + "summary": "Create creates an applicationset", + "operationId": "ApplicationSetService_Create", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1ApplicationSet" + } + }, + { + "type": "boolean", + "name": "upsert", + "in": "query" + }, + { + "type": "boolean", + "name": "dryRun", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1ApplicationSet" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applicationsets/generate": { + "post": { + "tags": [ + "ApplicationSetService" + ], + "summary": "Generate generates", + "operationId": "ApplicationSetService_Generate", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/applicationsetApplicationSetGenerateRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationsetApplicationSetGenerateResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applicationsets/{name}": { + "get": { + "tags": [ + "ApplicationSetService" + ], + "summary": "Get returns an applicationset by name", + "operationId": "ApplicationSetService_Get", + "parameters": [ + { + "type": "string", + "description": "the applicationsets's name", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The application set namespace. Default empty is argocd control plane namespace.", + "name": "appsetNamespace", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1ApplicationSet" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "delete": { + "tags": [ + "ApplicationSetService" + ], + "summary": "Delete deletes an application set", + "operationId": "ApplicationSetService_Delete", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The application set namespace. Default empty is argocd control plane namespace.", + "name": "appsetNamespace", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationsetApplicationSetResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/applicationsets/{name}/resource-tree": { + "get": { + "tags": [ + "ApplicationSetService" + ], + "summary": "ResourceTree returns resource tree", + "operationId": "ApplicationSetService_ResourceTree", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + }, + { + "type": "string", + "description": "The application set namespace. Default empty is argocd control plane namespace.", + "name": "appsetNamespace", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1ApplicationSetTree" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/certificates": { + "get": { + "tags": [ + "CertificateService" + ], + "summary": "List all available repository certificates", + "operationId": "CertificateService_ListCertificates", + "parameters": [ + { + "type": "string", + "description": "A file-glob pattern (not regular expression) the host name has to match.", + "name": "hostNamePattern", + "in": "query" + }, + { + "type": "string", + "description": "The type of the certificate to match (ssh or https).", + "name": "certType", + "in": "query" + }, + { + "type": "string", + "description": "The sub type of the certificate to match (protocol dependent, usually only used for ssh certs).", + "name": "certSubType", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepositoryCertificateList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "CertificateService" + ], + "summary": "Creates repository certificates on the server", + "operationId": "CertificateService_CreateCertificate", + "parameters": [ + { + "description": "List of certificates to be created", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1RepositoryCertificateList" + } + }, + { + "type": "boolean", + "description": "Whether to upsert already existing certificates.", + "name": "upsert", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepositoryCertificateList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "delete": { + "tags": [ + "CertificateService" + ], + "summary": "Delete the certificates that match the RepositoryCertificateQuery", + "operationId": "CertificateService_DeleteCertificate", + "parameters": [ + { + "type": "string", + "description": "A file-glob pattern (not regular expression) the host name has to match.", + "name": "hostNamePattern", + "in": "query" + }, + { + "type": "string", + "description": "The type of the certificate to match (ssh or https).", + "name": "certType", + "in": "query" + }, + { + "type": "string", + "description": "The sub type of the certificate to match (protocol dependent, usually only used for ssh certs).", + "name": "certSubType", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepositoryCertificateList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/clusters": { + "get": { + "tags": [ + "ClusterService" + ], + "summary": "List returns list of clusters", + "operationId": "ClusterService_List", + "parameters": [ + { + "type": "string", + "name": "server", + "in": "query" + }, + { + "type": "string", + "name": "name", + "in": "query" + }, + { + "type": "string", + "description": "type is the type of the specified cluster identifier ( \"server\" - default, \"name\" ).", + "name": "id.type", + "in": "query" + }, + { + "type": "string", + "description": "value holds the cluster server URL or cluster name.", + "name": "id.value", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1ClusterList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "ClusterService" + ], + "summary": "Create creates a cluster", + "operationId": "ClusterService_Create", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1Cluster" + } + }, + { + "type": "boolean", + "name": "upsert", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Cluster" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/clusters/{id.value}": { + "get": { + "tags": [ + "ClusterService" + ], + "summary": "Get returns a cluster by server address", + "operationId": "ClusterService_Get", + "parameters": [ + { + "type": "string", + "description": "value holds the cluster server URL or cluster name", + "name": "id.value", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "server", + "in": "query" + }, + { + "type": "string", + "name": "name", + "in": "query" + }, + { + "type": "string", + "description": "type is the type of the specified cluster identifier ( \"server\" - default, \"name\" ).", + "name": "id.type", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Cluster" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "put": { + "tags": [ + "ClusterService" + ], + "summary": "Update updates a cluster", + "operationId": "ClusterService_Update", + "parameters": [ + { + "type": "string", + "description": "value holds the cluster server URL or cluster name", + "name": "id.value", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1Cluster" + } + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "name": "updatedFields", + "in": "query" + }, + { + "type": "string", + "description": "type is the type of the specified cluster identifier ( \"server\" - default, \"name\" ).", + "name": "id.type", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Cluster" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "delete": { + "tags": [ + "ClusterService" + ], + "summary": "Delete deletes a cluster", + "operationId": "ClusterService_Delete", + "parameters": [ + { + "type": "string", + "description": "value holds the cluster server URL or cluster name", + "name": "id.value", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "server", + "in": "query" + }, + { + "type": "string", + "name": "name", + "in": "query" + }, + { + "type": "string", + "description": "type is the type of the specified cluster identifier ( \"server\" - default, \"name\" ).", + "name": "id.type", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/clusterClusterResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/clusters/{id.value}/invalidate-cache": { + "post": { + "tags": [ + "ClusterService" + ], + "summary": "InvalidateCache invalidates cluster cache", + "operationId": "ClusterService_InvalidateCache", + "parameters": [ + { + "type": "string", + "description": "value holds the cluster server URL or cluster name", + "name": "id.value", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Cluster" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/clusters/{id.value}/rotate-auth": { + "post": { + "tags": [ + "ClusterService" + ], + "summary": "RotateAuth rotates the bearer token used for a cluster", + "operationId": "ClusterService_RotateAuth", + "parameters": [ + { + "type": "string", + "description": "value holds the cluster server URL or cluster name", + "name": "id.value", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/clusterClusterResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/gpgkeys": { + "get": { + "tags": [ + "GPGKeyService" + ], + "summary": "List all available repository certificates", + "operationId": "GPGKeyService_List", + "parameters": [ + { + "type": "string", + "description": "The GPG key ID to query for.", + "name": "keyID", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1GnuPGPublicKeyList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "GPGKeyService" + ], + "summary": "Create one or more GPG public keys in the server's configuration", + "operationId": "GPGKeyService_Create", + "parameters": [ + { + "description": "Raw key data of the GPG key(s) to create", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1GnuPGPublicKey" + } + }, + { + "type": "boolean", + "description": "Whether to upsert already existing public keys.", + "name": "upsert", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/gpgkeyGnuPGPublicKeyCreateResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "delete": { + "tags": [ + "GPGKeyService" + ], + "summary": "Delete specified GPG public key from the server's configuration", + "operationId": "GPGKeyService_Delete", + "parameters": [ + { + "type": "string", + "description": "The GPG key ID to query for.", + "name": "keyID", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/gpgkeyGnuPGPublicKeyResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/gpgkeys/{keyID}": { + "get": { + "tags": [ + "GPGKeyService" + ], + "summary": "Get information about specified GPG public key from the server", + "operationId": "GPGKeyService_Get", + "parameters": [ + { + "type": "string", + "description": "The GPG key ID to query for", + "name": "keyID", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1GnuPGPublicKey" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/notifications/services": { + "get": { + "tags": [ + "NotificationService" + ], + "summary": "List returns list of services", + "operationId": "NotificationService_ListServices", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/notificationServiceList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/notifications/templates": { + "get": { + "tags": [ + "NotificationService" + ], + "summary": "List returns list of templates", + "operationId": "NotificationService_ListTemplates", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/notificationTemplateList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/notifications/triggers": { + "get": { + "tags": [ + "NotificationService" + ], + "summary": "List returns list of triggers", + "operationId": "NotificationService_ListTriggers", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/notificationTriggerList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/projects": { + "get": { + "tags": [ + "ProjectService" + ], + "summary": "List returns list of projects", + "operationId": "ProjectService_List", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1AppProjectList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "ProjectService" + ], + "summary": "Create a new project", + "operationId": "ProjectService_Create", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/projectProjectCreateRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1AppProject" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/projects/{name}": { + "get": { + "tags": [ + "ProjectService" + ], + "summary": "Get returns a project by name", + "operationId": "ProjectService_Get", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1AppProject" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "delete": { + "tags": [ + "ProjectService" + ], + "summary": "Delete deletes a project", + "operationId": "ProjectService_Delete", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/projectEmptyResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/projects/{name}/detailed": { + "get": { + "tags": [ + "ProjectService" + ], + "summary": "GetDetailedProject returns a project that include project, global project and scoped resources by name", + "operationId": "ProjectService_GetDetailedProject", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/projectDetailedProjectsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/projects/{name}/events": { + "get": { + "tags": [ + "ProjectService" + ], + "summary": "ListEvents returns a list of project events", + "operationId": "ProjectService_ListEvents", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1EventList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/projects/{name}/globalprojects": { + "get": { + "tags": [ + "ProjectService" + ], + "summary": "Get returns a virtual project by name", + "operationId": "ProjectService_GetGlobalProjects", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/projectGlobalProjectsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/projects/{name}/links": { + "get": { + "tags": [ + "ProjectService" + ], + "summary": "ListLinks returns all deep links for the particular project", + "operationId": "ProjectService_ListLinks", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/applicationLinksResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/projects/{name}/syncwindows": { + "get": { + "tags": [ + "ProjectService" + ], + "summary": "GetSchedulesState returns true if there are any active sync syncWindows", + "operationId": "ProjectService_GetSyncWindowsState", + "parameters": [ + { + "type": "string", + "name": "name", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/projectSyncWindowsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/projects/{project.metadata.name}": { + "put": { + "tags": [ + "ProjectService" + ], + "summary": "Update updates a project", + "operationId": "ProjectService_Update", + "parameters": [ + { + "type": "string", + "description": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names\n+optional", + "name": "project.metadata.name", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/projectProjectUpdateRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1AppProject" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/projects/{project}/roles/{role}/token": { + "post": { + "tags": [ + "ProjectService" + ], + "summary": "Create a new project token", + "operationId": "ProjectService_CreateToken", + "parameters": [ + { + "type": "string", + "name": "project", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "role", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/projectProjectTokenCreateRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/projectProjectTokenResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/projects/{project}/roles/{role}/token/{iat}": { + "delete": { + "tags": [ + "ProjectService" + ], + "summary": "Delete a new project token", + "operationId": "ProjectService_DeleteToken", + "parameters": [ + { + "type": "string", + "name": "project", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "role", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "int64", + "name": "iat", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "id", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/projectEmptyResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repocreds": { + "get": { + "tags": [ + "RepoCredsService" + ], + "summary": "ListRepositoryCredentials gets a list of all configured repository credential sets", + "operationId": "RepoCredsService_ListRepositoryCredentials", + "parameters": [ + { + "type": "string", + "description": "Repo URL for query.", + "name": "url", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepoCredsList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "RepoCredsService" + ], + "summary": "CreateRepositoryCredentials creates a new repository credential set", + "operationId": "RepoCredsService_CreateRepositoryCredentials", + "parameters": [ + { + "description": "Repository definition", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1RepoCreds" + } + }, + { + "type": "boolean", + "description": "Whether to create in upsert mode.", + "name": "upsert", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepoCreds" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repocreds/{creds.url}": { + "put": { + "tags": [ + "RepoCredsService" + ], + "summary": "UpdateRepositoryCredentials updates a repository credential set", + "operationId": "RepoCredsService_UpdateRepositoryCredentials", + "parameters": [ + { + "type": "string", + "description": "URL is the URL to which these credentials match", + "name": "creds.url", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1RepoCreds" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepoCreds" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repocreds/{url}": { + "delete": { + "tags": [ + "RepoCredsService" + ], + "summary": "DeleteRepositoryCredentials deletes a repository credential set from the configuration", + "operationId": "RepoCredsService_DeleteRepositoryCredentials", + "parameters": [ + { + "type": "string", + "name": "url", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repocredsRepoCredsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repositories": { + "get": { + "tags": [ + "RepositoryService" + ], + "summary": "ListRepositories gets a list of all configured repositories", + "operationId": "RepositoryService_ListRepositories", + "parameters": [ + { + "type": "string", + "description": "Repo URL for query.", + "name": "repo", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", + "in": "query" + }, + { + "type": "string", + "description": "App project for query.", + "name": "appProject", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepositoryList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "RepositoryService" + ], + "summary": "CreateRepository creates a new repository configuration", + "operationId": "RepositoryService_CreateRepository", + "parameters": [ + { + "description": "Repository definition", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1Repository" + } + }, + { + "type": "boolean", + "description": "Whether to create in upsert mode.", + "name": "upsert", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether to operate on credential set instead of repository.", + "name": "credsOnly", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Repository" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repositories/{repo.repo}": { + "put": { + "tags": [ + "RepositoryService" + ], + "summary": "UpdateRepository updates a repository configuration", + "operationId": "RepositoryService_UpdateRepository", + "parameters": [ + { + "type": "string", + "description": "Repo contains the URL to the remote repository", + "name": "repo.repo", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1Repository" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Repository" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repositories/{repo}": { + "get": { + "tags": [ + "RepositoryService" + ], + "summary": "Get returns a repository or its credentials", + "operationId": "RepositoryService_Get", + "parameters": [ + { + "type": "string", + "description": "Repo URL for query", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", + "in": "query" + }, + { + "type": "string", + "description": "App project for query.", + "name": "appProject", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Repository" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "delete": { + "tags": [ + "RepositoryService" + ], + "summary": "DeleteRepository deletes a repository from the configuration", + "operationId": "RepositoryService_DeleteRepository", + "parameters": [ + { + "type": "string", + "description": "Repo URL for query", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", + "in": "query" + }, + { + "type": "string", + "description": "App project for query.", + "name": "appProject", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repositoryRepoResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repositories/{repo}/apps": { + "get": { + "tags": [ + "RepositoryService" + ], + "summary": "ListApps returns list of apps in the repo", + "operationId": "RepositoryService_ListApps", + "parameters": [ + { + "type": "string", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "revision", + "in": "query" + }, + { + "type": "string", + "name": "appName", + "in": "query" + }, + { + "type": "string", + "name": "appProject", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repositoryRepoAppsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repositories/{repo}/helmcharts": { + "get": { + "tags": [ + "RepositoryService" + ], + "summary": "GetHelmCharts returns list of helm charts in the specified repository", + "operationId": "RepositoryService_GetHelmCharts", + "parameters": [ + { + "type": "string", + "description": "Repo URL for query", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", + "in": "query" + }, + { + "type": "string", + "description": "App project for query.", + "name": "appProject", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repositoryHelmChartsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repositories/{repo}/refs": { + "get": { + "tags": [ + "RepositoryService" + ], + "operationId": "RepositoryService_ListRefs", + "parameters": [ + { + "type": "string", + "description": "Repo URL for query", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", + "in": "query" + }, + { + "type": "string", + "description": "App project for query.", + "name": "appProject", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repositoryRefs" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repositories/{repo}/validate": { + "post": { + "tags": [ + "RepositoryService" + ], + "summary": "ValidateAccess validates access to a repository with given parameters", + "operationId": "RepositoryService_ValidateAccess", + "parameters": [ + { + "type": "string", + "description": "The URL to the repo", + "name": "repo", + "in": "path", + "required": true + }, + { + "description": "The URL to the repo", + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "string" + } + }, + { + "type": "string", + "description": "Username for accessing repo.", + "name": "username", + "in": "query" + }, + { + "type": "string", + "description": "Password for accessing repo.", + "name": "password", + "in": "query" + }, + { + "type": "string", + "description": "Private key data for accessing SSH repository.", + "name": "sshPrivateKey", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether to skip certificate or host key validation.", + "name": "insecure", + "in": "query" + }, + { + "type": "string", + "description": "TLS client cert data for accessing HTTPS repository.", + "name": "tlsClientCertData", + "in": "query" + }, + { + "type": "string", + "description": "TLS client cert key for accessing HTTPS repository.", + "name": "tlsClientCertKey", + "in": "query" + }, + { + "type": "string", + "description": "The type of the repo.", + "name": "type", + "in": "query" + }, + { + "type": "string", + "description": "The name of the repo.", + "name": "name", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether helm-oci support should be enabled for this repo.", + "name": "enableOci", + "in": "query" + }, + { + "type": "string", + "description": "Github App Private Key PEM data.", + "name": "githubAppPrivateKey", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "Github App ID of the app used to access the repo.", + "name": "githubAppID", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "Github App Installation ID of the installed GitHub App.", + "name": "githubAppInstallationID", + "in": "query" + }, + { + "type": "string", + "description": "Github App Enterprise base url if empty will default to https://api.github.com.", + "name": "githubAppEnterpriseBaseUrl", + "in": "query" + }, + { + "type": "string", + "description": "HTTP/HTTPS proxy to access the repository.", + "name": "proxy", + "in": "query" + }, + { + "type": "string", + "description": "Reference between project and repository that allow you automatically to be added as item inside SourceRepos project entity.", + "name": "project", + "in": "query" + }, + { + "type": "string", + "description": "Google Cloud Platform service account key.", + "name": "gcpServiceAccountKey", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether to force HTTP basic auth.", + "name": "forceHttpBasicAuth", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repositoryRepoResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/repositories/{source.repoURL}/appdetails": { + "post": { + "tags": [ + "RepositoryService" + ], + "summary": "GetAppDetails returns application details by given path", + "operationId": "RepositoryService_GetAppDetails", + "parameters": [ + { + "type": "string", + "description": "RepoURL is the URL to the repository (Git or Helm) that contains the application manifests", + "name": "source.repoURL", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/repositoryRepoAppDetailsQuery" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repositoryRepoAppDetailsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/session": { + "post": { + "tags": [ + "SessionService" + ], + "summary": "Create a new JWT for authentication and set a cookie if using HTTP", + "operationId": "SessionService_Create", + "parameters": [ + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/sessionSessionCreateRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/sessionSessionResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "delete": { + "tags": [ + "SessionService" + ], + "summary": "Delete an existing JWT cookie if using HTTP", + "operationId": "SessionService_Delete", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/sessionSessionResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/session/userinfo": { + "get": { + "tags": [ + "SessionService" + ], + "summary": "Get the current user's info", + "operationId": "SessionService_GetUserInfo", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/sessionGetUserInfoResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/settings": { + "get": { + "tags": [ + "SettingsService" + ], + "summary": "Get returns Argo CD settings", + "operationId": "SettingsService_Get", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/clusterSettings" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/settings/plugins": { + "get": { + "tags": [ + "SettingsService" + ], + "summary": "Get returns Argo CD plugins", + "operationId": "SettingsService_GetPlugins", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/clusterSettingsPluginsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/stream/applications": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "Watch returns stream of application change events", + "operationId": "ApplicationService_Watch", + "parameters": [ + { + "type": "string", + "description": "the application's name.", + "name": "name", + "in": "query" + }, + { + "type": "string", + "description": "forces application reconciliation if set to 'hard'.", + "name": "refresh", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "the project names to restrict returned list applications.", + "name": "projects", + "in": "query" + }, + { + "type": "string", + "description": "when specified with a watch call, shows changes that occur after that particular version of a resource.", + "name": "resourceVersion", + "in": "query" + }, + { + "type": "string", + "description": "the selector to restrict returned list to applications only with matched labels.", + "name": "selector", + "in": "query" + }, + { + "type": "string", + "description": "the repoURL to restrict returned list applications.", + "name": "repo", + "in": "query" + }, + { + "type": "string", + "description": "the application's namespace.", + "name": "appNamespace", + "in": "query" + }, + { + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi", + "description": "the project names to restrict returned list applications (legacy name for backwards-compatibility).", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.(streaming responses)", + "schema": { + "type": "object", + "title": "Stream result of v1alpha1ApplicationWatchEvent", + "properties": { + "error": { + "$ref": "#/definitions/runtimeStreamError" + }, + "result": { + "$ref": "#/definitions/v1alpha1ApplicationWatchEvent" + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/stream/applications/{applicationName}/resource-tree": { + "get": { + "tags": [ + "ApplicationService" + ], + "summary": "Watch returns stream of application resource tree", + "operationId": "ApplicationService_WatchResourceTree", + "parameters": [ + { + "type": "string", + "name": "applicationName", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "namespace", + "in": "query" + }, + { + "type": "string", + "name": "name", + "in": "query" + }, + { + "type": "string", + "name": "version", + "in": "query" + }, + { + "type": "string", + "name": "group", + "in": "query" + }, + { + "type": "string", + "name": "kind", + "in": "query" + }, + { + "type": "string", + "name": "appNamespace", + "in": "query" + }, + { + "type": "string", + "name": "project", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.(streaming responses)", + "schema": { + "type": "object", + "title": "Stream result of v1alpha1ApplicationTree", + "properties": { + "error": { + "$ref": "#/definitions/runtimeStreamError" + }, + "result": { + "$ref": "#/definitions/v1alpha1ApplicationTree" + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/write-repocreds": { + "get": { + "tags": [ + "RepoCredsService" + ], + "summary": "ListWriteRepositoryCredentials gets a list of all configured repository credential sets that have write access", + "operationId": "RepoCredsService_ListWriteRepositoryCredentials", + "parameters": [ + { + "type": "string", + "description": "Repo URL for query.", + "name": "url", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepoCredsList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "RepoCredsService" + ], + "summary": "CreateWriteRepositoryCredentials creates a new repository credential set with write access", + "operationId": "RepoCredsService_CreateWriteRepositoryCredentials", + "parameters": [ + { + "description": "Repository definition", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1RepoCreds" + } + }, + { + "type": "boolean", + "description": "Whether to create in upsert mode.", + "name": "upsert", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepoCreds" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/write-repocreds/{creds.url}": { + "put": { + "tags": [ + "RepoCredsService" + ], + "summary": "UpdateWriteRepositoryCredentials updates a repository credential set with write access", + "operationId": "RepoCredsService_UpdateWriteRepositoryCredentials", + "parameters": [ + { + "type": "string", + "description": "URL is the URL to which these credentials match", + "name": "creds.url", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1RepoCreds" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepoCreds" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/write-repocreds/{url}": { + "delete": { + "tags": [ + "RepoCredsService" + ], + "summary": "DeleteWriteRepositoryCredentials deletes a repository credential set with write access from the configuration", + "operationId": "RepoCredsService_DeleteWriteRepositoryCredentials", + "parameters": [ + { + "type": "string", + "name": "url", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repocredsRepoCredsResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/write-repositories": { + "get": { + "tags": [ + "RepositoryService" + ], + "summary": "ListWriteRepositories gets a list of all configured write repositories", + "operationId": "RepositoryService_ListWriteRepositories", + "parameters": [ + { + "type": "string", + "description": "Repo URL for query.", + "name": "repo", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", + "in": "query" + }, + { + "type": "string", + "description": "App project for query.", + "name": "appProject", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1RepositoryList" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "post": { + "tags": [ + "RepositoryService" + ], + "summary": "CreateWriteRepository creates a new write repository configuration", + "operationId": "RepositoryService_CreateWriteRepository", + "parameters": [ + { + "description": "Repository definition", + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1Repository" + } + }, + { + "type": "boolean", + "description": "Whether to create in upsert mode.", + "name": "upsert", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether to operate on credential set instead of repository.", + "name": "credsOnly", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Repository" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/write-repositories/{repo.repo}": { + "put": { + "tags": [ + "RepositoryService" + ], + "summary": "UpdateWriteRepository updates a write repository configuration", + "operationId": "RepositoryService_UpdateWriteRepository", + "parameters": [ + { + "type": "string", + "description": "Repo contains the URL to the remote repository", + "name": "repo.repo", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1alpha1Repository" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Repository" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/write-repositories/{repo}": { + "get": { + "tags": [ + "RepositoryService" + ], + "summary": "GetWrite returns a repository or its write credentials", + "operationId": "RepositoryService_GetWrite", + "parameters": [ + { + "type": "string", + "description": "Repo URL for query", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", + "in": "query" + }, + { + "type": "string", + "description": "App project for query.", + "name": "appProject", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1alpha1Repository" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + }, + "delete": { + "tags": [ + "RepositoryService" + ], + "summary": "DeleteWriteRepository deletes a write repository from the configuration", + "operationId": "RepositoryService_DeleteWriteRepository", + "parameters": [ + { + "type": "string", + "description": "Repo URL for query", + "name": "repo", + "in": "path", + "required": true + }, + { + "type": "boolean", + "description": "Whether to force a cache refresh on repo's connection state.", + "name": "forceRefresh", + "in": "query" + }, + { + "type": "string", + "description": "App project for query.", + "name": "appProject", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repositoryRepoResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/v1/write-repositories/{repo}/validate": { + "post": { + "tags": [ + "RepositoryService" + ], + "summary": "ValidateWriteAccess validates write access to a repository with given parameters", + "operationId": "RepositoryService_ValidateWriteAccess", + "parameters": [ + { + "type": "string", + "description": "The URL to the repo", + "name": "repo", + "in": "path", + "required": true + }, + { + "description": "The URL to the repo", + "name": "body", + "in": "body", + "required": true, + "schema": { + "type": "string" + } + }, + { + "type": "string", + "description": "Username for accessing repo.", + "name": "username", + "in": "query" + }, + { + "type": "string", + "description": "Password for accessing repo.", + "name": "password", + "in": "query" + }, + { + "type": "string", + "description": "Private key data for accessing SSH repository.", + "name": "sshPrivateKey", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether to skip certificate or host key validation.", + "name": "insecure", + "in": "query" + }, + { + "type": "string", + "description": "TLS client cert data for accessing HTTPS repository.", + "name": "tlsClientCertData", + "in": "query" + }, + { + "type": "string", + "description": "TLS client cert key for accessing HTTPS repository.", + "name": "tlsClientCertKey", + "in": "query" + }, + { + "type": "string", + "description": "The type of the repo.", + "name": "type", + "in": "query" + }, + { + "type": "string", + "description": "The name of the repo.", + "name": "name", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether helm-oci support should be enabled for this repo.", + "name": "enableOci", + "in": "query" + }, + { + "type": "string", + "description": "Github App Private Key PEM data.", + "name": "githubAppPrivateKey", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "Github App ID of the app used to access the repo.", + "name": "githubAppID", + "in": "query" + }, + { + "type": "string", + "format": "int64", + "description": "Github App Installation ID of the installed GitHub App.", + "name": "githubAppInstallationID", + "in": "query" + }, + { + "type": "string", + "description": "Github App Enterprise base url if empty will default to https://api.github.com.", + "name": "githubAppEnterpriseBaseUrl", + "in": "query" + }, + { + "type": "string", + "description": "HTTP/HTTPS proxy to access the repository.", + "name": "proxy", + "in": "query" + }, + { + "type": "string", + "description": "Reference between project and repository that allow you automatically to be added as item inside SourceRepos project entity.", + "name": "project", + "in": "query" + }, + { + "type": "string", + "description": "Google Cloud Platform service account key.", + "name": "gcpServiceAccountKey", + "in": "query" + }, + { + "type": "boolean", + "description": "Whether to force HTTP basic auth.", + "name": "forceHttpBasicAuth", + "in": "query" + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/repositoryRepoResponse" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + }, + "/api/version": { + "get": { + "tags": [ + "VersionService" + ], + "summary": "Version returns version information of the API server", + "operationId": "VersionService_Version", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/versionVersionMessage" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + } + } + } + }, + "definitions": { + "accountAccount": { + "type": "object", + "properties": { + "capabilities": { + "type": "array", + "items": { + "type": "string" + } + }, + "enabled": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "tokens": { + "type": "array", + "items": { + "$ref": "#/definitions/accountToken" + } + } + } + }, + "accountAccountsList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/accountAccount" + } + } + } + }, + "accountCanIResponse": { + "type": "object", + "properties": { + "value": { + "type": "string" + } + } + }, + "accountCreateTokenRequest": { + "type": "object", + "properties": { + "expiresIn": { + "type": "integer", + "format": "int64", + "title": "expiresIn represents a duration in seconds" + }, + "id": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "accountCreateTokenResponse": { + "type": "object", + "properties": { + "token": { + "type": "string" + } + } + }, + "accountEmptyResponse": { + "type": "object" + }, + "accountToken": { + "type": "object", + "properties": { + "expiresAt": { + "type": "integer", + "format": "int64" + }, + "id": { + "type": "string" + }, + "issuedAt": { + "type": "integer", + "format": "int64" + } + } + }, + "accountUpdatePasswordRequest": { + "type": "object", + "properties": { + "currentPassword": { + "type": "string" + }, + "name": { + "type": "string" + }, + "newPassword": { + "type": "string" + } + } + }, + "accountUpdatePasswordResponse": { + "type": "object" + }, + "applicationApplicationManifestQueryWithFiles": { + "type": "object", + "properties": { + "appNamespace": { + "type": "string" + }, + "checksum": { + "type": "string" + }, + "name": { + "type": "string" + }, + "project": { + "type": "string" + } + } + }, + "applicationApplicationManifestQueryWithFilesWrapper": { + "type": "object", + "properties": { + "chunk": { + "$ref": "#/definitions/applicationFileChunk" + }, + "query": { + "$ref": "#/definitions/applicationApplicationManifestQueryWithFiles" + } + } + }, + "applicationApplicationPatchRequest": { + "type": "object", + "title": "ApplicationPatchRequest is a request to patch an application", + "properties": { + "appNamespace": { + "type": "string" + }, + "name": { + "type": "string" + }, + "patch": { + "type": "string" + }, + "patchType": { + "type": "string" + }, + "project": { + "type": "string" + } + } + }, + "applicationApplicationResourceResponse": { + "type": "object", + "properties": { + "manifest": { + "type": "string" + } + } + }, + "applicationApplicationResponse": { + "type": "object" + }, + "applicationApplicationRollbackRequest": { + "type": "object", + "properties": { + "appNamespace": { + "type": "string" + }, + "dryRun": { + "type": "boolean" + }, + "id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + }, + "project": { + "type": "string" + }, + "prune": { + "type": "boolean" + } + } + }, + "applicationApplicationSyncRequest": { + "type": "object", + "title": "ApplicationSyncRequest is a request to apply the config state to live state", + "properties": { + "appNamespace": { + "type": "string" + }, + "dryRun": { + "type": "boolean" + }, + "infos": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Info" + } + }, + "manifests": { + "type": "array", + "items": { + "type": "string" + } + }, + "name": { + "type": "string" + }, + "project": { + "type": "string" + }, + "prune": { + "type": "boolean" + }, + "resources": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1SyncOperationResource" + } + }, + "retryStrategy": { + "$ref": "#/definitions/v1alpha1RetryStrategy" + }, + "revision": { + "type": "string" + }, + "revisions": { + "type": "array", + "items": { + "type": "string" + } + }, + "sourcePositions": { + "type": "array", + "items": { + "type": "string", + "format": "int64" + } + }, + "strategy": { + "$ref": "#/definitions/v1alpha1SyncStrategy" + }, + "syncOptions": { + "$ref": "#/definitions/applicationSyncOptions" + } + } + }, + "applicationApplicationSyncWindow": { + "type": "object", + "properties": { + "duration": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "manualSync": { + "type": "boolean" + }, + "schedule": { + "type": "string" + } + } + }, + "applicationApplicationSyncWindowsResponse": { + "type": "object", + "properties": { + "activeWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/applicationApplicationSyncWindow" + } + }, + "assignedWindows": { + "type": "array", + "items": { + "$ref": "#/definitions/applicationApplicationSyncWindow" + } + }, + "canSync": { + "type": "boolean" + } + } + }, + "applicationFileChunk": { + "type": "object", + "properties": { + "chunk": { + "type": "string", + "format": "byte" + } + } + }, + "applicationLinkInfo": { + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "iconClass": { + "type": "string" + }, + "title": { + "type": "string" + }, + "url": { + "type": "string" + } + } + }, + "applicationLinksResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/applicationLinkInfo" + } + } + } + }, + "applicationLogEntry": { + "type": "object", + "properties": { + "content": { + "type": "string" + }, + "last": { + "type": "boolean" + }, + "podName": { + "type": "string" + }, + "timeStamp": { + "$ref": "#/definitions/v1Time" + }, + "timeStampStr": { + "type": "string" + } + } + }, + "applicationManagedResourcesResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ResourceDiff" + } + } + } + }, + "applicationOperationTerminateResponse": { + "type": "object" + }, + "applicationResourceActionsListResponse": { + "type": "object", + "properties": { + "actions": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ResourceAction" + } + } + } + }, + "applicationSyncOptions": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "applicationsetApplicationSetGenerateRequest": { + "type": "object", + "title": "ApplicationSetGetQuery is a query for applicationset resources", + "properties": { + "applicationSet": { + "$ref": "#/definitions/v1alpha1ApplicationSet" + } + } + }, + "applicationsetApplicationSetGenerateResponse": { + "type": "object", + "title": "ApplicationSetGenerateResponse is a response for applicationset generate request", + "properties": { + "applications": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Application" + } + } + } + }, + "applicationsetApplicationSetResponse": { + "type": "object", + "properties": { + "applicationset": { + "$ref": "#/definitions/v1alpha1ApplicationSet" + }, + "project": { + "type": "string" + } + } + }, + "applicationv1alpha1EnvEntry": { + "type": "object", + "title": "EnvEntry represents an entry in the application's environment", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of the variable, usually expressed in uppercase" + }, + "value": { + "type": "string", + "title": "Value is the value of the variable" + } + } + }, + "applicationv1alpha1ResourceStatus": { + "type": "object", + "title": "ResourceStatus holds the current sync and health status of a resource\nTODO: describe members of this type", + "properties": { + "group": { + "type": "string" + }, + "health": { + "$ref": "#/definitions/v1alpha1HealthStatus" + }, + "hook": { + "type": "boolean" + }, + "kind": { + "type": "string" + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "requiresDeletionConfirmation": { + "type": "boolean" + }, + "requiresPruning": { + "type": "boolean" + }, + "status": { + "type": "string" + }, + "syncWave": { + "type": "integer", + "format": "int64" + }, + "version": { + "type": "string" + } + } + }, + "clusterClusterID": { + "type": "object", + "title": "ClusterID holds a cluster server URL or cluster name", + "properties": { + "type": { + "type": "string", + "title": "type is the type of the specified cluster identifier ( \"server\" - default, \"name\" )" + }, + "value": { + "type": "string", + "title": "value holds the cluster server URL or cluster name" + } + } + }, + "clusterClusterResponse": { + "type": "object" + }, + "clusterConnector": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "clusterDexConfig": { + "type": "object", + "properties": { + "connectors": { + "type": "array", + "items": { + "$ref": "#/definitions/clusterConnector" + } + } + } + }, + "clusterGoogleAnalyticsConfig": { + "type": "object", + "properties": { + "anonymizeUsers": { + "type": "boolean" + }, + "trackingID": { + "type": "string" + } + } + }, + "clusterHelp": { + "type": "object", + "title": "Help settings", + "properties": { + "binaryUrls": { + "type": "object", + "title": "the URLs for downloading argocd binaries", + "additionalProperties": { + "type": "string" + } + }, + "chatText": { + "type": "string", + "title": "the text for getting chat help, defaults to \"Chat now!\"" + }, + "chatUrl": { + "type": "string", + "title": "the URL for getting chat help, this will typically be your Slack channel for support" + } + } + }, + "clusterOIDCConfig": { + "type": "object", + "properties": { + "cliClientID": { + "type": "string" + }, + "clientID": { + "type": "string" + }, + "enablePKCEAuthentication": { + "type": "boolean" + }, + "idTokenClaims": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/oidcClaim" + } + }, + "issuer": { + "type": "string" + }, + "name": { + "type": "string" + }, + "scopes": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "clusterPlugin": { + "type": "object", + "title": "Plugin settings", + "properties": { + "name": { + "type": "string", + "title": "the name of the plugin, e.g. \"kasane\"" + } + } + }, + "clusterSettings": { + "type": "object", + "properties": { + "additionalUrls": { + "type": "array", + "items": { + "type": "string" + } + }, + "appLabelKey": { + "type": "string" + }, + "appsInAnyNamespaceEnabled": { + "type": "boolean" + }, + "configManagementPlugins": { + "description": "Deprecated: use sidecar plugins instead.", + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ConfigManagementPlugin" + } + }, + "controllerNamespace": { + "type": "string" + }, + "dexConfig": { + "$ref": "#/definitions/clusterDexConfig" + }, + "execEnabled": { + "type": "boolean" + }, + "googleAnalytics": { + "$ref": "#/definitions/clusterGoogleAnalyticsConfig" + }, + "help": { + "$ref": "#/definitions/clusterHelp" + }, + "hydratorEnabled": { + "type": "boolean" + }, + "impersonationEnabled": { + "type": "boolean" + }, + "installationID": { + "type": "string" + }, + "kustomizeOptions": { + "$ref": "#/definitions/v1alpha1KustomizeOptions" + }, + "kustomizeVersions": { + "type": "array", + "items": { + "type": "string" + } + }, + "oidcConfig": { + "$ref": "#/definitions/clusterOIDCConfig" + }, + "passwordPattern": { + "type": "string" + }, + "plugins": { + "type": "array", + "items": { + "$ref": "#/definitions/clusterPlugin" + } + }, + "resourceOverrides": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/v1alpha1ResourceOverride" + } + }, + "statusBadgeEnabled": { + "type": "boolean" + }, + "statusBadgeRootUrl": { + "type": "string" + }, + "trackingMethod": { + "type": "string" + }, + "uiBannerContent": { + "type": "string" + }, + "uiBannerPermanent": { + "type": "boolean" + }, + "uiBannerPosition": { + "type": "string" + }, + "uiBannerURL": { + "type": "string" + }, + "uiCssURL": { + "type": "string" + }, + "url": { + "type": "string" + }, + "userLoginsDisabled": { + "type": "boolean" + } + } + }, + "clusterSettingsPluginsResponse": { + "type": "object", + "properties": { + "plugins": { + "type": "array", + "items": { + "$ref": "#/definitions/clusterPlugin" + } + } + } + }, + "gpgkeyGnuPGPublicKeyCreateResponse": { + "type": "object", + "title": "Response to a public key creation request", + "properties": { + "created": { + "$ref": "#/definitions/v1alpha1GnuPGPublicKeyList" + }, + "skipped": { + "type": "array", + "title": "List of key IDs that haven been skipped because they already exist on the server", + "items": { + "type": "string" + } + } + } + }, + "gpgkeyGnuPGPublicKeyResponse": { + "type": "object", + "title": "Generic (empty) response for GPG public key CRUD requests" + }, + "intstrIntOrString": { + "description": "+protobuf=true\n+protobuf.options.(gogoproto.goproto_stringer)=false\n+k8s:openapi-gen=true", + "type": "object", + "title": "IntOrString is a type that can hold an int32 or a string. When used in\nJSON or YAML marshalling and unmarshalling, it produces or consumes the\ninner type. This allows you to have, for example, a JSON field that can\naccept a name or number.\nTODO: Rename to Int32OrString", + "properties": { + "intVal": { + "type": "integer", + "format": "int32" + }, + "strVal": { + "type": "string" + }, + "type": { + "type": "integer", + "format": "int64" + } + } + }, + "notificationService": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + }, + "notificationServiceList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/notificationService" + } + } + } + }, + "notificationTemplate": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + }, + "notificationTemplateList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/notificationTemplate" + } + } + } + }, + "notificationTrigger": { + "type": "object", + "properties": { + "name": { + "type": "string" + } + } + }, + "notificationTriggerList": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/notificationTrigger" + } + } + } + }, + "oidcClaim": { + "type": "object", + "properties": { + "essential": { + "type": "boolean" + }, + "value": { + "type": "string" + }, + "values": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "projectDetailedProjectsResponse": { + "type": "object", + "properties": { + "clusters": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Cluster" + } + }, + "globalProjects": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1AppProject" + } + }, + "project": { + "$ref": "#/definitions/v1alpha1AppProject" + }, + "repositories": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Repository" + } + } + } + }, + "projectEmptyResponse": { + "type": "object" + }, + "projectGlobalProjectsResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1AppProject" + } + } + } + }, + "projectProjectCreateRequest": { + "description": "ProjectCreateRequest defines project creation parameters.", + "type": "object", + "properties": { + "project": { + "$ref": "#/definitions/v1alpha1AppProject" + }, + "upsert": { + "type": "boolean" + } + } + }, + "projectProjectTokenCreateRequest": { + "description": "ProjectTokenCreateRequest defines project token creation parameters.", + "type": "object", + "properties": { + "description": { + "type": "string" + }, + "expiresIn": { + "type": "integer", + "format": "int64", + "title": "expiresIn represents a duration in seconds" + }, + "id": { + "type": "string" + }, + "project": { + "type": "string" + }, + "role": { + "type": "string" + } + } + }, + "projectProjectTokenResponse": { + "description": "ProjectTokenResponse wraps the created token or returns an empty string if deleted.", + "type": "object", + "properties": { + "token": { + "type": "string" + } + } + }, + "projectProjectUpdateRequest": { + "type": "object", + "properties": { + "project": { + "$ref": "#/definitions/v1alpha1AppProject" + } + } + }, + "projectSyncWindowsResponse": { + "type": "object", + "properties": { + "windows": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1SyncWindow" + } + } + } + }, + "protobufAny": { + "type": "object", + "properties": { + "type_url": { + "type": "string" + }, + "value": { + "type": "string", + "format": "byte" + } + } + }, + "repocredsRepoCredsResponse": { + "type": "object", + "title": "RepoCredsResponse is a response to most repository credentials requests" + }, + "repositoryAppInfo": { + "type": "object", + "title": "AppInfo contains application type and app file path", + "properties": { + "path": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "repositoryDirectoryAppSpec": { + "type": "object", + "title": "DirectoryAppSpec contains directory" + }, + "repositoryHelmAppSpec": { + "type": "object", + "title": "HelmAppSpec contains helm app name in source repo", + "properties": { + "fileParameters": { + "type": "array", + "title": "helm file parameters", + "items": { + "$ref": "#/definitions/v1alpha1HelmFileParameter" + } + }, + "name": { + "type": "string" + }, + "parameters": { + "type": "array", + "title": "the output of `helm inspect values`", + "items": { + "$ref": "#/definitions/v1alpha1HelmParameter" + } + }, + "valueFiles": { + "type": "array", + "items": { + "type": "string" + } + }, + "values": { + "type": "string", + "title": "the contents of values.yaml" + } + } + }, + "repositoryHelmChart": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "versions": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "repositoryHelmChartsResponse": { + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/repositoryHelmChart" + } + } + } + }, + "repositoryKustomizeAppSpec": { + "type": "object", + "title": "KustomizeAppSpec contains kustomize images", + "properties": { + "images": { + "description": "images is a list of available images.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "repositoryManifestResponse": { + "type": "object", + "properties": { + "commands": { + "type": "array", + "title": "Commands is the list of commands used to hydrate the manifests", + "items": { + "type": "string" + } + }, + "manifests": { + "type": "array", + "items": { + "type": "string" + } + }, + "namespace": { + "type": "string" + }, + "revision": { + "type": "string", + "title": "resolved revision" + }, + "server": { + "type": "string" + }, + "sourceType": { + "type": "string" + }, + "verifyResult": { + "type": "string", + "title": "Raw response of git verify-commit operation (always the empty string for Helm)" + } + } + }, + "repositoryParameterAnnouncement": { + "type": "object", + "properties": { + "array": { + "description": "array is the default value of the parameter if the parameter is an array.", + "type": "array", + "items": { + "type": "string" + } + }, + "collectionType": { + "description": "collectionType is the type of value this parameter holds - either a single value (a string) or a collection\n(array or map). If collectionType is set, only the field with that type will be used. If collectionType is not\nset, `string` is the default. If collectionType is set to an invalid value, a validation error is thrown.", + "type": "string" + }, + "itemType": { + "description": "itemType determines the primitive data type represented by the parameter. Parameters are always encoded as\nstrings, but this field lets them be interpreted as other primitive types.", + "type": "string" + }, + "map": { + "description": "map is the default value of the parameter if the parameter is a map.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "name": { + "description": "name is the name identifying a parameter.", + "type": "string" + }, + "required": { + "description": "required defines if this given parameter is mandatory.", + "type": "boolean" + }, + "string": { + "description": "string is the default value of the parameter if the parameter is a string.", + "type": "string" + }, + "title": { + "description": "title is a human-readable text of the parameter name.", + "type": "string" + }, + "tooltip": { + "description": "tooltip is a human-readable description of the parameter.", + "type": "string" + } + } + }, + "repositoryPluginAppSpec": { + "type": "object", + "title": "PluginAppSpec contains details about a plugin-type Application", + "properties": { + "parametersAnnouncement": { + "type": "array", + "items": { + "$ref": "#/definitions/repositoryParameterAnnouncement" + } + } + } + }, + "repositoryRefs": { + "type": "object", + "title": "A subset of the repository's named refs", + "properties": { + "branches": { + "type": "array", + "items": { + "type": "string" + } + }, + "tags": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "repositoryRepoAppDetailsQuery": { + "type": "object", + "title": "RepoAppDetailsQuery contains query information for app details request", + "properties": { + "appName": { + "type": "string" + }, + "appProject": { + "type": "string" + }, + "source": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + }, + "sourceIndex": { + "type": "integer", + "format": "int32", + "title": "source index (for multi source apps)" + }, + "versionId": { + "type": "integer", + "format": "int32", + "title": "versionId from historical data (for multi source apps)" + } + } + }, + "repositoryRepoAppDetailsResponse": { + "type": "object", + "title": "RepoAppDetailsResponse application details", + "properties": { + "directory": { + "$ref": "#/definitions/repositoryDirectoryAppSpec" + }, + "helm": { + "$ref": "#/definitions/repositoryHelmAppSpec" + }, + "kustomize": { + "$ref": "#/definitions/repositoryKustomizeAppSpec" + }, + "plugin": { + "$ref": "#/definitions/repositoryPluginAppSpec" + }, + "type": { + "type": "string" + } + } + }, + "repositoryRepoAppsResponse": { + "type": "object", + "title": "RepoAppsResponse contains applications of specified repository", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/repositoryAppInfo" + } + } + } + }, + "repositoryRepoResponse": { + "type": "object" + }, + "runtimeError": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + }, + "error": { + "type": "string" + }, + "message": { + "type": "string" + } + } + }, + "runtimeRawExtension": { + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true", + "type": "object", + "properties": { + "raw": { + "description": "Raw is the underlying serialization of this object.\n\nTODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.", + "type": "string", + "format": "byte" + } + } + }, + "runtimeStreamError": { + "type": "object", + "properties": { + "details": { + "type": "array", + "items": { + "$ref": "#/definitions/protobufAny" + } + }, + "grpc_code": { + "type": "integer", + "format": "int32" + }, + "http_code": { + "type": "integer", + "format": "int32" + }, + "http_status": { + "type": "string" + }, + "message": { + "type": "string" + } + } + }, + "sessionGetUserInfoResponse": { + "type": "object", + "title": "The current user's userInfo info", + "properties": { + "groups": { + "type": "array", + "items": { + "type": "string" + } + }, + "iss": { + "type": "string" + }, + "loggedIn": { + "type": "boolean" + }, + "username": { + "type": "string" + } + } + }, + "sessionSessionCreateRequest": { + "description": "SessionCreateRequest is for logging in.", + "type": "object", + "properties": { + "password": { + "type": "string" + }, + "token": { + "type": "string" + }, + "username": { + "type": "string" + } + } + }, + "sessionSessionResponse": { + "description": "SessionResponse wraps the created token or returns an empty string if deleted.", + "type": "object", + "properties": { + "token": { + "type": "string" + } + } + }, + "v1Event": { + "description": "Event is a report of an event somewhere in the cluster. Events\nhave a limited retention time and triggers and messages may evolve\nwith time. Event consumers should not rely on the timing of an event\nwith a given Reason reflecting a consistent underlying trigger, or the\ncontinued existence of events with that Reason. Events should be\ntreated as informative, best-effort, supplemental data.", + "type": "object", + "properties": { + "action": { + "type": "string", + "title": "What action was taken/failed regarding to the Regarding object.\n+optional" + }, + "count": { + "type": "integer", + "format": "int32", + "title": "The number of times this event has occurred.\n+optional" + }, + "eventTime": { + "$ref": "#/definitions/v1MicroTime" + }, + "firstTimestamp": { + "$ref": "#/definitions/v1Time" + }, + "involvedObject": { + "$ref": "#/definitions/v1ObjectReference" + }, + "lastTimestamp": { + "$ref": "#/definitions/v1Time" + }, + "message": { + "type": "string", + "title": "A human-readable description of the status of this operation.\nTODO: decide on maximum length.\n+optional" + }, + "metadata": { + "$ref": "#/definitions/v1ObjectMeta" + }, + "reason": { + "type": "string", + "title": "This should be a short, machine understandable string that gives the reason\nfor the transition into the object's current status.\nTODO: provide exact specification for format.\n+optional" + }, + "related": { + "$ref": "#/definitions/v1ObjectReference" + }, + "reportingComponent": { + "type": "string", + "title": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.\n+optional" + }, + "reportingInstance": { + "type": "string", + "title": "ID of the controller instance, e.g. `kubelet-xyzf`.\n+optional" + }, + "series": { + "$ref": "#/definitions/v1EventSeries" + }, + "source": { + "$ref": "#/definitions/v1EventSource" + }, + "type": { + "type": "string", + "title": "Type of this event (Normal, Warning), new types could be added in the future\n+optional" + } + } + }, + "v1EventList": { + "description": "EventList is a list of events.", + "type": "object", + "properties": { + "items": { + "type": "array", + "title": "List of events", + "items": { + "$ref": "#/definitions/v1Event" + } + }, + "metadata": { + "$ref": "#/definitions/v1ListMeta" + } + } + }, + "v1EventSeries": { + "description": "EventSeries contain information on series of events, i.e. thing that was/is happening\ncontinuously for some time.", + "type": "object", + "properties": { + "count": { + "type": "integer", + "format": "int32", + "title": "Number of occurrences in this series up to the last heartbeat time" + }, + "lastObservedTime": { + "$ref": "#/definitions/v1MicroTime" + } + } + }, + "v1EventSource": { + "description": "EventSource contains information for an event.", + "type": "object", + "properties": { + "component": { + "type": "string", + "title": "Component from which the event is generated.\n+optional" + }, + "host": { + "type": "string", + "title": "Node name on which the event is generated.\n+optional" + } + } + }, + "v1FieldsV1": { + "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set,\nor a string representing a sub-field or item. The string will follow one of these four formats:\n'f:', where is the name of a field in a struct, or key in a map\n'v:', where is the exact json formatted value of a list item\n'i:', where is position of a item in a list\n'k:', where is a map of a list item's key fields to their unique values\nIf a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff\n+protobuf.options.(gogoproto.goproto_stringer)=false", + "type": "object", + "properties": { + "Raw": { + "description": "Raw is the underlying serialization of this object.", + "type": "string", + "format": "byte" + } + } + }, + "v1GroupKind": { + "description": "+protobuf.options.(gogoproto.goproto_stringer)=false", + "type": "object", + "title": "GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying\nconcepts during lookup stages without having partially valid types", + "properties": { + "group": { + "type": "string" + }, + "kind": { + "type": "string" + } + } + }, + "v1JSON": { + "description": "JSON represents any valid JSON value.\nThese types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil.", + "type": "object", + "properties": { + "raw": { + "type": "string", + "format": "byte" + } + } + }, + "v1LabelSelector": { + "type": "object", + "title": "A label selector is a label query over a set of resources. The result of matchLabels and\nmatchExpressions are ANDed. An empty label selector matches all objects. A null\nlabel selector matches no objects.\n+structType=atomic", + "properties": { + "matchExpressions": { + "type": "array", + "title": "matchExpressions is a list of label selector requirements. The requirements are ANDed.\n+optional\n+listType=atomic", + "items": { + "$ref": "#/definitions/v1LabelSelectorRequirement" + } + }, + "matchLabels": { + "type": "object", + "title": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels\nmap is equivalent to an element of matchExpressions, whose key field is \"key\", the\noperator is \"In\", and the values array contains only \"value\". The requirements are ANDed.\n+optional", + "additionalProperties": { + "type": "string" + } + } + } + }, + "v1LabelSelectorRequirement": { + "description": "A label selector requirement is a selector that contains values, a key, and an operator that\nrelates the key and values.", + "type": "object", + "properties": { + "key": { + "description": "key is the label key that the selector applies to.", + "type": "string" + }, + "operator": { + "description": "operator represents a key's relationship to a set of values.\nValid operators are In, NotIn, Exists and DoesNotExist.", + "type": "string" + }, + "values": { + "type": "array", + "title": "values is an array of string values. If the operator is In or NotIn,\nthe values array must be non-empty. If the operator is Exists or DoesNotExist,\nthe values array must be empty. This array is replaced during a strategic\nmerge patch.\n+optional\n+listType=atomic", + "items": { + "type": "string" + } + } + } + }, + "v1ListMeta": { + "description": "ListMeta describes metadata that synthetic resources must have, including lists and\nvarious status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "type": "object", + "properties": { + "continue": { + "description": "continue may be set if the user set a limit on the number of items returned, and indicates that\nthe server has more data available. The value is opaque and may be used to issue another request\nto the endpoint that served this list to retrieve the next set of available objects. Continuing a\nconsistent list may not be possible if the server configuration has changed or more than a few\nminutes have passed. The resourceVersion field returned when using this continue value will be\nidentical to the value in the first response, unless you have received this token from an error\nmessage.", + "type": "string" + }, + "remainingItemCount": { + "type": "integer", + "format": "int64", + "title": "remainingItemCount is the number of subsequent items in the list which are not included in this\nlist response. If the list request contained label or field selectors, then the number of\nremaining items is unknown and the field will be left unset and omitted during serialization.\nIf the list is complete (either because it is not chunking or because this is the last chunk),\nthen there are no more remaining items and this field will be left unset and omitted during\nserialization.\nServers older than v1.15 do not set this field.\nThe intended use of the remainingItemCount is *estimating* the size of a collection. Clients\nshould not rely on the remainingItemCount to be set or to be exact.\n+optional" + }, + "resourceVersion": { + "type": "string", + "title": "String that identifies the server's internal version of this object that\ncan be used by clients to determine when objects have changed.\nValue must be treated as opaque by clients and passed unmodified back to the server.\nPopulated by the system.\nRead-only.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional" + }, + "selfLink": { + "type": "string", + "title": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.\n+optional" + } + } + }, + "v1LoadBalancerIngress": { + "description": "LoadBalancerIngress represents the status of a load-balancer ingress point:\ntraffic intended for the service should be sent to an ingress point.", + "type": "object", + "properties": { + "hostname": { + "type": "string", + "title": "Hostname is set for load-balancer ingress points that are DNS based\n(typically AWS load-balancers)\n+optional" + }, + "ip": { + "type": "string", + "title": "IP is set for load-balancer ingress points that are IP based\n(typically GCE or OpenStack load-balancers)\n+optional" + }, + "ipMode": { + "type": "string", + "title": "IPMode specifies how the load-balancer IP behaves, and may only be specified when the ip field is specified.\nSetting this to \"VIP\" indicates that traffic is delivered to the node with\nthe destination set to the load-balancer's IP and port.\nSetting this to \"Proxy\" indicates that traffic is delivered to the node or pod with\nthe destination set to the node's IP and node port or the pod's IP and port.\nService implementations may use this information to adjust traffic routing.\n+optional" + }, + "ports": { + "type": "array", + "title": "Ports is a list of records of service ports\nIf used, every port defined in the service should have an entry in it\n+listType=atomic\n+optional", + "items": { + "$ref": "#/definitions/v1PortStatus" + } + } + } + }, + "v1ManagedFieldsEntry": { + "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource\nthat the fieldset applies to.", + "type": "object", + "properties": { + "apiVersion": { + "description": "APIVersion defines the version of this resource that this field set\napplies to. The format is \"group/version\" just like the top-level\nAPIVersion field. It is necessary to track the version of a field\nset because it cannot be automatically converted.", + "type": "string" + }, + "fieldsType": { + "type": "string", + "title": "FieldsType is the discriminator for the different fields format and version.\nThere is currently only one possible value: \"FieldsV1\"" + }, + "fieldsV1": { + "$ref": "#/definitions/v1FieldsV1" + }, + "manager": { + "description": "Manager is an identifier of the workflow managing these fields.", + "type": "string" + }, + "operation": { + "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created.\nThe only valid values for this field are 'Apply' and 'Update'.", + "type": "string" + }, + "subresource": { + "description": "Subresource is the name of the subresource used to update that object, or\nempty string if the object was updated through the main resource. The\nvalue of this field is used to distinguish between managers, even if they\nshare the same name. For example, a status update will be distinct from a\nregular update using the same manager name.\nNote that the APIVersion field is not related to the Subresource field and\nit always corresponds to the version of the main resource.", + "type": "string" + }, + "time": { + "$ref": "#/definitions/v1Time" + } + } + }, + "v1MicroTime": { + "description": "MicroTime is version of Time with microsecond level precision.\n\n+protobuf.options.marshal=false\n+protobuf.as=Timestamp\n+protobuf.options.(gogoproto.goproto_stringer)=false", + "type": "object", + "properties": { + "nanos": { + "description": "Non-negative fractions of a second at nanosecond resolution. Negative\nsecond values with fractions must still have non-negative nanos values\nthat count forward in time. Must be from 0 to 999,999,999\ninclusive. This field may be limited in precision depending on context.", + "type": "integer", + "format": "int32" + }, + "seconds": { + "description": "Represents seconds of UTC time since Unix epoch\n1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to\n9999-12-31T23:59:59Z inclusive.", + "type": "integer", + "format": "int64" + } + } + }, + "v1NodeSystemInfo": { + "description": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", + "type": "object", + "properties": { + "architecture": { + "type": "string", + "title": "The Architecture reported by the node" + }, + "bootID": { + "description": "Boot ID reported by the node.", + "type": "string" + }, + "containerRuntimeVersion": { + "description": "ContainerRuntime Version reported by the node through runtime remote API (e.g. containerd://1.4.2).", + "type": "string" + }, + "kernelVersion": { + "description": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", + "type": "string" + }, + "kubeProxyVersion": { + "description": "Deprecated: KubeProxy Version reported by the node.", + "type": "string" + }, + "kubeletVersion": { + "description": "Kubelet Version reported by the node.", + "type": "string" + }, + "machineID": { + "type": "string", + "title": "MachineID reported by the node. For unique machine identification\nin the cluster this field is preferred. Learn more from man(5)\nmachine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html" + }, + "operatingSystem": { + "type": "string", + "title": "The Operating System reported by the node" + }, + "osImage": { + "description": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", + "type": "string" + }, + "systemUUID": { + "type": "string", + "title": "SystemUUID reported by the node. For unique machine identification\nMachineID is preferred. This field is specific to Red Hat hosts\nhttps://access.redhat.com/documentation/en-us/red_hat_subscription_management/1/html/rhsm/uuid" + } + } + }, + "v1ObjectMeta": { + "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects\nusers must create.", + "type": "object", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations is an unstructured key value map stored with a resource that may be\nset by external tools to store and retrieve arbitrary metadata. They are not\nqueryable and should be preserved when modifying objects.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations\n+optional", + "additionalProperties": { + "type": "string" + } + }, + "creationTimestamp": { + "$ref": "#/definitions/v1Time" + }, + "deletionGracePeriodSeconds": { + "type": "integer", + "format": "int64", + "title": "Number of seconds allowed for this object to gracefully terminate before\nit will be removed from the system. Only set when deletionTimestamp is also set.\nMay only be shortened.\nRead-only.\n+optional" + }, + "deletionTimestamp": { + "$ref": "#/definitions/v1Time" + }, + "finalizers": { + "type": "array", + "title": "Must be empty before the object is deleted from the registry. Each entry\nis an identifier for the responsible component that will remove the entry\nfrom the list. If the deletionTimestamp of the object is non-nil, entries\nin this list can only be removed.\nFinalizers may be processed and removed in any order. Order is NOT enforced\nbecause it introduces significant risk of stuck finalizers.\nfinalizers is a shared field, any actor with permission can reorder it.\nIf the finalizer list is processed in order, then this can lead to a situation\nin which the component responsible for the first finalizer in the list is\nwaiting for a signal (field value, external system, or other) produced by a\ncomponent responsible for a finalizer later in the list, resulting in a deadlock.\nWithout enforced ordering finalizers are free to order amongst themselves and\nare not vulnerable to ordering changes in the list.\n+optional\n+patchStrategy=merge\n+listType=set", + "items": { + "type": "string" + } + }, + "generateName": { + "description": "GenerateName is an optional prefix, used by the server, to generate a unique\nname ONLY IF the Name field has not been provided.\nIf this field is used, the name returned to the client will be different\nthan the name passed. This value will also be combined with a unique suffix.\nThe provided value has the same validation rules as the Name field,\nand may be truncated by the length of the suffix required to make the value\nunique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency\n+optional", + "type": "string" + }, + "generation": { + "type": "integer", + "format": "int64", + "title": "A sequence number representing a specific generation of the desired state.\nPopulated by the system. Read-only.\n+optional" + }, + "labels": { + "type": "object", + "title": "Map of string keys and values that can be used to organize and categorize\n(scope and select) objects. May match selectors of replication controllers\nand services.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels\n+optional", + "additionalProperties": { + "type": "string" + } + }, + "managedFields": { + "description": "ManagedFields maps workflow-id and version to the set of fields\nthat are managed by that workflow. This is mostly for internal\nhousekeeping, and users typically shouldn't need to set or\nunderstand this field. A workflow can be the user's name, a\ncontroller's name, or the name of a specific apply path like\n\"ci-cd\". The set of fields is always in the version that the\nworkflow used when modifying the object.\n\n+optional\n+listType=atomic", + "type": "array", + "items": { + "$ref": "#/definitions/v1ManagedFieldsEntry" + } + }, + "name": { + "type": "string", + "title": "Name must be unique within a namespace. Is required when creating resources, although\nsome resources may allow a client to request the generation of an appropriate name\nautomatically. Name is primarily intended for creation idempotence and configuration\ndefinition.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names\n+optional" + }, + "namespace": { + "description": "Namespace defines the space within which each name must be unique. An empty namespace is\nequivalent to the \"default\" namespace, but \"default\" is the canonical representation.\nNot all objects are required to be scoped to a namespace - the value of this field for\nthose objects will be empty.\n\nMust be a DNS_LABEL.\nCannot be updated.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces\n+optional", + "type": "string" + }, + "ownerReferences": { + "type": "array", + "title": "List of objects depended by this object. If ALL objects in the list have\nbeen deleted, this object will be garbage collected. If this object is managed by a controller,\nthen an entry in this list will point to this controller, with the controller field set to true.\nThere cannot be more than one managing controller.\n+optional\n+patchMergeKey=uid\n+patchStrategy=merge\n+listType=map\n+listMapKey=uid", + "items": { + "$ref": "#/definitions/v1OwnerReference" + } + }, + "resourceVersion": { + "description": "An opaque value that represents the internal version of this object that can\nbe used by clients to determine when objects have changed. May be used for optimistic\nconcurrency, change detection, and the watch operation on a resource or set of resources.\nClients must treat these values as opaque and passed unmodified back to the server.\nThey may only be valid for a particular resource or set of resources.\n\nPopulated by the system.\nRead-only.\nValue must be treated as opaque by clients and .\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional", + "type": "string" + }, + "selfLink": { + "type": "string", + "title": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.\n+optional" + }, + "uid": { + "description": "UID is the unique in time and space value for this object. It is typically generated by\nthe server on successful creation of a resource and is not allowed to change on PUT\noperations.\n\nPopulated by the system.\nRead-only.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids\n+optional", + "type": "string" + } + } + }, + "v1ObjectReference": { + "description": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\n\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic", + "type": "object", + "properties": { + "apiVersion": { + "type": "string", + "title": "API version of the referent.\n+optional" + }, + "fieldPath": { + "type": "string", + "title": "If referring to a piece of an object instead of an entire object, this string\nshould contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2].\nFor example, if the object reference is to a container within a pod, this would take on a value like:\n\"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered\nthe event) or if no container name is specified \"spec.containers[2]\" (container with\nindex 2 in this pod). This syntax is chosen only to have some well-defined way of\nreferencing a part of an object.\nTODO: this design is not final and this field is subject to change in the future.\n+optional" + }, + "kind": { + "type": "string", + "title": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n+optional" + }, + "name": { + "type": "string", + "title": "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names\n+optional" + }, + "namespace": { + "type": "string", + "title": "Namespace of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/\n+optional" + }, + "resourceVersion": { + "type": "string", + "title": "Specific resourceVersion to which this reference is made, if any.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency\n+optional" + }, + "uid": { + "type": "string", + "title": "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids\n+optional" + } + } + }, + "v1OwnerReference": { + "type": "object", + "title": "OwnerReference contains enough information to let you identify an owning\nobject. An owning object must be in the same namespace as the dependent, or\nbe cluster-scoped, so there is no namespace field.\n+structType=atomic", + "properties": { + "apiVersion": { + "description": "API version of the referent.", + "type": "string" + }, + "blockOwnerDeletion": { + "type": "boolean", + "title": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then\nthe owner cannot be deleted from the key-value store until this\nreference is removed.\nSee https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion\nfor how the garbage collector interacts with this field and enforces the foreground deletion.\nDefaults to false.\nTo set this field, a user needs \"delete\" permission of the owner,\notherwise 422 (Unprocessable Entity) will be returned.\n+optional" + }, + "controller": { + "type": "boolean", + "title": "If true, this reference points to the managing controller.\n+optional" + }, + "kind": { + "type": "string", + "title": "Kind of the referent.\nMore info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + }, + "name": { + "type": "string", + "title": "Name of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names" + }, + "uid": { + "type": "string", + "title": "UID of the referent.\nMore info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids" + } + } + }, + "v1PortStatus": { + "type": "object", + "properties": { + "error": { + "type": "string", + "title": "Error is to record the problem with the service port\nThe format of the error shall comply with the following rules:\n- built-in error values shall be specified in this file and those shall use\n CamelCase names\n- cloud provider specific error values must have names that comply with the\n format foo.example.com/CamelCase.\n---\nThe regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)\n+optional\n+kubebuilder:validation:Required\n+kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$`\n+kubebuilder:validation:MaxLength=316" + }, + "port": { + "type": "integer", + "format": "int32", + "title": "Port is the port number of the service port of which status is recorded here" + }, + "protocol": { + "type": "string", + "title": "Protocol is the protocol of the service port of which status is recorded here\nThe supported values are: \"TCP\", \"UDP\", \"SCTP\"" + } + } + }, + "v1Time": { + "description": "Time is a wrapper around time.Time which supports correct\nmarshaling to YAML and JSON. Wrappers are provided for many\nof the factory methods that the time package offers.\n\n+protobuf.options.marshal=false\n+protobuf.as=Timestamp\n+protobuf.options.(gogoproto.goproto_stringer)=false", + "type": "string", + "format": "date-time" + }, + "v1alpha1AWSAuthConfig": { + "type": "object", + "title": "AWSAuthConfig is an AWS IAM authentication configuration", + "properties": { + "clusterName": { + "type": "string", + "title": "ClusterName contains AWS cluster name" + }, + "profile": { + "description": "Profile contains optional role ARN. If set then AWS IAM Authenticator uses the profile to perform cluster operations instead of the default AWS credential provider chain.", + "type": "string" + }, + "roleARN": { + "description": "RoleARN contains optional role ARN. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain.", + "type": "string" + } + } + }, + "v1alpha1AppProject": { + "type": "object", + "title": "AppProject provides a logical grouping of applications, providing controls for:\n* where the apps may deploy to (cluster whitelist)\n* what may be deployed (repository whitelist, resource whitelist/blacklist)\n* who can access these applications (roles, OIDC group claims bindings)\n* and what they can do (RBAC policies)\n* automation access to these roles (JWT tokens)\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=appprojects,shortName=appproj;appprojs", + "properties": { + "metadata": { + "$ref": "#/definitions/v1ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/v1alpha1AppProjectSpec" + }, + "status": { + "$ref": "#/definitions/v1alpha1AppProjectStatus" + } + } + }, + "v1alpha1AppProjectList": { + "type": "object", + "title": "AppProjectList is list of AppProject resources\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1AppProject" + } + }, + "metadata": { + "$ref": "#/definitions/v1ListMeta" + } + } + }, + "v1alpha1AppProjectSpec": { + "type": "object", + "title": "AppProjectSpec is the specification of an AppProject", + "properties": { + "clusterResourceBlacklist": { + "type": "array", + "title": "ClusterResourceBlacklist contains list of blacklisted cluster level resources", + "items": { + "$ref": "#/definitions/v1GroupKind" + } + }, + "clusterResourceWhitelist": { + "type": "array", + "title": "ClusterResourceWhitelist contains list of whitelisted cluster level resources", + "items": { + "$ref": "#/definitions/v1GroupKind" + } + }, + "description": { + "type": "string", + "title": "Description contains optional project description" + }, + "destinationServiceAccounts": { + "description": "DestinationServiceAccounts holds information about the service accounts to be impersonated for the application sync operation for each destination.", + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationDestinationServiceAccount" + } + }, + "destinations": { + "type": "array", + "title": "Destinations contains list of destinations available for deployment", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationDestination" + } + }, + "namespaceResourceBlacklist": { + "type": "array", + "title": "NamespaceResourceBlacklist contains list of blacklisted namespace level resources", + "items": { + "$ref": "#/definitions/v1GroupKind" + } + }, + "namespaceResourceWhitelist": { + "type": "array", + "title": "NamespaceResourceWhitelist contains list of whitelisted namespace level resources", + "items": { + "$ref": "#/definitions/v1GroupKind" + } + }, + "orphanedResources": { + "$ref": "#/definitions/v1alpha1OrphanedResourcesMonitorSettings" + }, + "permitOnlyProjectScopedClusters": { + "type": "boolean", + "title": "PermitOnlyProjectScopedClusters determines whether destinations can only reference clusters which are project-scoped" + }, + "roles": { + "type": "array", + "title": "Roles are user defined RBAC roles associated with this project", + "items": { + "$ref": "#/definitions/v1alpha1ProjectRole" + } + }, + "signatureKeys": { + "type": "array", + "title": "SignatureKeys contains a list of PGP key IDs that commits in Git must be signed with in order to be allowed for sync", + "items": { + "$ref": "#/definitions/v1alpha1SignatureKey" + } + }, + "sourceNamespaces": { + "type": "array", + "title": "SourceNamespaces defines the namespaces application resources are allowed to be created in", + "items": { + "type": "string" + } + }, + "sourceRepos": { + "type": "array", + "title": "SourceRepos contains list of repository URLs which can be used for deployment", + "items": { + "type": "string" + } + }, + "syncWindows": { + "type": "array", + "title": "SyncWindows controls when syncs can be run for apps in this project", + "items": { + "$ref": "#/definitions/v1alpha1SyncWindow" + } + } + } + }, + "v1alpha1AppProjectStatus": { + "type": "object", + "title": "AppProjectStatus contains status information for AppProject CRs", + "properties": { + "jwtTokensByRole": { + "type": "object", + "title": "JWTTokensByRole contains a list of JWT tokens issued for a given role", + "additionalProperties": { + "$ref": "#/definitions/v1alpha1JWTTokens" + } + } + } + }, + "v1alpha1Application": { + "type": "object", + "title": "Application is a definition of Application resource.\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=applications,shortName=app;apps\n+kubebuilder:printcolumn:name=\"Sync Status\",type=string,JSONPath=`.status.sync.status`\n+kubebuilder:printcolumn:name=\"Health Status\",type=string,JSONPath=`.status.health.status`\n+kubebuilder:printcolumn:name=\"Revision\",type=string,JSONPath=`.status.sync.revision`,priority=10\n+kubebuilder:printcolumn:name=\"Project\",type=string,JSONPath=`.spec.project`,priority=10", + "properties": { + "metadata": { + "$ref": "#/definitions/v1ObjectMeta" + }, + "operation": { + "$ref": "#/definitions/v1alpha1Operation" + }, + "spec": { + "$ref": "#/definitions/v1alpha1ApplicationSpec" + }, + "status": { + "$ref": "#/definitions/v1alpha1ApplicationStatus" + } + } + }, + "v1alpha1ApplicationCondition": { + "type": "object", + "title": "ApplicationCondition contains details about an application condition, which is usually an error or warning", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/v1Time" + }, + "message": { + "type": "string", + "title": "Message contains human-readable message indicating details about condition" + }, + "type": { + "type": "string", + "title": "Type is an application condition type" + } + } + }, + "v1alpha1ApplicationDestination": { + "type": "object", + "title": "ApplicationDestination holds information about the application's destination", + "properties": { + "name": { + "description": "Name is an alternate way of specifying the target cluster by its symbolic name. This must be set if Server is not set.", + "type": "string" + }, + "namespace": { + "type": "string", + "title": "Namespace specifies the target namespace for the application's resources.\nThe namespace will only be set for namespace-scoped resources that have not set a value for .metadata.namespace" + }, + "server": { + "description": "Server specifies the URL of the target cluster's Kubernetes control plane API. This must be set if Name is not set.", + "type": "string" + } + } + }, + "v1alpha1ApplicationDestinationServiceAccount": { + "description": "ApplicationDestinationServiceAccount holds information about the service account to be impersonated for the application sync operation.", + "type": "object", + "properties": { + "defaultServiceAccount": { + "type": "string", + "title": "DefaultServiceAccount to be used for impersonation during the sync operation" + }, + "namespace": { + "description": "Namespace specifies the target namespace for the application's resources.", + "type": "string" + }, + "server": { + "description": "Server specifies the URL of the target cluster's Kubernetes control plane API.", + "type": "string" + } + } + }, + "v1alpha1ApplicationList": { + "type": "object", + "title": "ApplicationList is list of Application resources\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Application" + } + }, + "metadata": { + "$ref": "#/definitions/v1ListMeta" + } + } + }, + "v1alpha1ApplicationMatchExpression": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "operator": { + "type": "string" + }, + "values": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1alpha1ApplicationPreservedFields": { + "type": "object", + "properties": { + "annotations": { + "type": "array", + "items": { + "type": "string" + } + }, + "labels": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1alpha1ApplicationSet": { + "type": "object", + "title": "ApplicationSet is a set of Application resources\n+genclient\n+genclient:noStatus\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:resource:path=applicationsets,shortName=appset;appsets\n+kubebuilder:subresource:status", + "properties": { + "metadata": { + "$ref": "#/definitions/v1ObjectMeta" + }, + "spec": { + "$ref": "#/definitions/v1alpha1ApplicationSetSpec" + }, + "status": { + "$ref": "#/definitions/v1alpha1ApplicationSetStatus" + } + } + }, + "v1alpha1ApplicationSetApplicationStatus": { + "type": "object", + "title": "ApplicationSetApplicationStatus contains details about each Application managed by the ApplicationSet", + "properties": { + "application": { + "type": "string", + "title": "Application contains the name of the Application resource" + }, + "lastTransitionTime": { + "$ref": "#/definitions/v1Time" + }, + "message": { + "type": "string", + "title": "Message contains human-readable message indicating details about the status" + }, + "status": { + "type": "string", + "title": "Status contains the AppSet's perceived status of the managed Application resource: (Waiting, Pending, Progressing, Healthy)" + }, + "step": { + "type": "string", + "title": "Step tracks which step this Application should be updated in" + }, + "targetrevisions": { + "description": "TargetRevision tracks the desired revisions the Application should be synced to.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1alpha1ApplicationSetCondition": { + "type": "object", + "title": "ApplicationSetCondition contains details about an applicationset condition, which is usually an error or warning", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/v1Time" + }, + "message": { + "type": "string", + "title": "Message contains human-readable message indicating details about condition" + }, + "reason": { + "type": "string", + "title": "Single word camelcase representing the reason for the status eg ErrorOccurred" + }, + "status": { + "type": "string", + "title": "True/False/Unknown" + }, + "type": { + "type": "string", + "title": "Type is an applicationset condition type" + } + } + }, + "v1alpha1ApplicationSetGenerator": { + "description": "ApplicationSetGenerator represents a generator at the top level of an ApplicationSet.", + "type": "object", + "properties": { + "clusterDecisionResource": { + "$ref": "#/definitions/v1alpha1DuckTypeGenerator" + }, + "clusters": { + "$ref": "#/definitions/v1alpha1ClusterGenerator" + }, + "git": { + "$ref": "#/definitions/v1alpha1GitGenerator" + }, + "list": { + "$ref": "#/definitions/v1alpha1ListGenerator" + }, + "matrix": { + "$ref": "#/definitions/v1alpha1MatrixGenerator" + }, + "merge": { + "$ref": "#/definitions/v1alpha1MergeGenerator" + }, + "plugin": { + "$ref": "#/definitions/v1alpha1PluginGenerator" + }, + "pullRequest": { + "$ref": "#/definitions/v1alpha1PullRequestGenerator" + }, + "scmProvider": { + "$ref": "#/definitions/v1alpha1SCMProviderGenerator" + }, + "selector": { + "$ref": "#/definitions/v1LabelSelector" + } + } + }, + "v1alpha1ApplicationSetList": { + "type": "object", + "title": "ApplicationSetList contains a list of ApplicationSet\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+kubebuilder:object:root=true", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSet" + } + }, + "metadata": { + "$ref": "#/definitions/v1ListMeta" + } + } + }, + "v1alpha1ApplicationSetNestedGenerator": { + "description": "ApplicationSetNestedGenerator represents a generator nested within a combination-type generator (MatrixGenerator or\nMergeGenerator).", + "type": "object", + "properties": { + "clusterDecisionResource": { + "$ref": "#/definitions/v1alpha1DuckTypeGenerator" + }, + "clusters": { + "$ref": "#/definitions/v1alpha1ClusterGenerator" + }, + "git": { + "$ref": "#/definitions/v1alpha1GitGenerator" + }, + "list": { + "$ref": "#/definitions/v1alpha1ListGenerator" + }, + "matrix": { + "$ref": "#/definitions/v1JSON" + }, + "merge": { + "$ref": "#/definitions/v1JSON" + }, + "plugin": { + "$ref": "#/definitions/v1alpha1PluginGenerator" + }, + "pullRequest": { + "$ref": "#/definitions/v1alpha1PullRequestGenerator" + }, + "scmProvider": { + "$ref": "#/definitions/v1alpha1SCMProviderGenerator" + }, + "selector": { + "$ref": "#/definitions/v1LabelSelector" + } + } + }, + "v1alpha1ApplicationSetResourceIgnoreDifferences": { + "description": "ApplicationSetResourceIgnoreDifferences configures how the ApplicationSet controller will ignore differences in live\napplications when applying changes from generated applications.", + "type": "object", + "properties": { + "jqPathExpressions": { + "description": "JQPathExpressions is a list of JQ path expressions to fields to ignore differences for.", + "type": "array", + "items": { + "type": "string" + } + }, + "jsonPointers": { + "description": "JSONPointers is a list of JSON pointers to fields to ignore differences for.", + "type": "array", + "items": { + "type": "string" + } + }, + "name": { + "description": "Name is the name of the application to ignore differences for. If not specified, the rule applies to all applications.", + "type": "string" + } + } + }, + "v1alpha1ApplicationSetRolloutStep": { + "type": "object", + "properties": { + "matchExpressions": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationMatchExpression" + } + }, + "maxUpdate": { + "$ref": "#/definitions/intstrIntOrString" + } + } + }, + "v1alpha1ApplicationSetRolloutStrategy": { + "type": "object", + "properties": { + "steps": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSetRolloutStep" + } + } + } + }, + "v1alpha1ApplicationSetSpec": { + "description": "ApplicationSetSpec represents a class of application set state.", + "type": "object", + "properties": { + "applyNestedSelectors": { + "type": "boolean", + "title": "ApplyNestedSelectors enables selectors defined within the generators of two level-nested matrix or merge generators" + }, + "generators": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSetGenerator" + } + }, + "goTemplate": { + "type": "boolean" + }, + "goTemplateOptions": { + "type": "array", + "items": { + "type": "string" + } + }, + "ignoreApplicationDifferences": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSetResourceIgnoreDifferences" + } + }, + "preservedFields": { + "$ref": "#/definitions/v1alpha1ApplicationPreservedFields" + }, + "strategy": { + "$ref": "#/definitions/v1alpha1ApplicationSetStrategy" + }, + "syncPolicy": { + "$ref": "#/definitions/v1alpha1ApplicationSetSyncPolicy" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + }, + "templatePatch": { + "type": "string" + } + } + }, + "v1alpha1ApplicationSetStatus": { + "type": "object", + "title": "ApplicationSetStatus defines the observed state of ApplicationSet", + "properties": { + "applicationStatus": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSetApplicationStatus" + } + }, + "conditions": { + "type": "array", + "title": "INSERT ADDITIONAL STATUS FIELD - define observed state of cluster\nImportant: Run \"make\" to regenerate code after modifying this file", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSetCondition" + } + }, + "resources": { + "description": "Resources is a list of Applications resources managed by this application set.", + "type": "array", + "items": { + "$ref": "#/definitions/applicationv1alpha1ResourceStatus" + } + } + } + }, + "v1alpha1ApplicationSetStrategy": { + "description": "ApplicationSetStrategy configures how generated Applications are updated in sequence.", + "type": "object", + "properties": { + "rollingSync": { + "$ref": "#/definitions/v1alpha1ApplicationSetRolloutStrategy" + }, + "type": { + "type": "string" + } + } + }, + "v1alpha1ApplicationSetSyncPolicy": { + "description": "ApplicationSetSyncPolicy configures how generated Applications will relate to their\nApplicationSet.", + "type": "object", + "properties": { + "applicationsSync": { + "type": "string", + "title": "ApplicationsSync represents the policy applied on the generated applications. Possible values are create-only, create-update, create-delete, sync\n+kubebuilder:validation:Optional\n+kubebuilder:validation:Enum=create-only;create-update;create-delete;sync" + }, + "preserveResourcesOnDeletion": { + "description": "PreserveResourcesOnDeletion will preserve resources on deletion. If PreserveResourcesOnDeletion is set to true, these Applications will not be deleted.", + "type": "boolean" + } + } + }, + "v1alpha1ApplicationSetTemplate": { + "type": "object", + "title": "ApplicationSetTemplate represents argocd ApplicationSpec", + "properties": { + "metadata": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplateMeta" + }, + "spec": { + "$ref": "#/definitions/v1alpha1ApplicationSpec" + } + } + }, + "v1alpha1ApplicationSetTemplateMeta": { + "type": "object", + "title": "ApplicationSetTemplateMeta represents the Argo CD application fields that may\nbe used for Applications generated from the ApplicationSet (based on metav1.ObjectMeta)", + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "finalizers": { + "type": "array", + "items": { + "type": "string" + } + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + } + } + }, + "v1alpha1ApplicationSetTree": { + "type": "object", + "title": "ApplicationSetTree holds nodes which belongs to the application\nUsed to build a tree of an ApplicationSet and its children", + "properties": { + "nodes": { + "type": "array", + "title": "Nodes contains list of nodes which are directly managed by the applicationset", + "items": { + "$ref": "#/definitions/v1alpha1ResourceNode" + } + } + } + }, + "v1alpha1ApplicationSource": { + "type": "object", + "title": "ApplicationSource contains all required information about the source of an application", + "properties": { + "chart": { + "description": "Chart is a Helm chart name, and must be specified for applications sourced from a Helm repo.", + "type": "string" + }, + "directory": { + "$ref": "#/definitions/v1alpha1ApplicationSourceDirectory" + }, + "helm": { + "$ref": "#/definitions/v1alpha1ApplicationSourceHelm" + }, + "kustomize": { + "$ref": "#/definitions/v1alpha1ApplicationSourceKustomize" + }, + "name": { + "description": "Name is used to refer to a source and is displayed in the UI. It is used in multi-source Applications.", + "type": "string" + }, + "path": { + "description": "Path is a directory path within the Git repository, and is only valid for applications sourced from Git.", + "type": "string" + }, + "plugin": { + "$ref": "#/definitions/v1alpha1ApplicationSourcePlugin" + }, + "ref": { + "description": "Ref is reference to another source within sources field. This field will not be used if used with a `source` tag.", + "type": "string" + }, + "repoURL": { + "type": "string", + "title": "RepoURL is the URL to the repository (Git or Helm) that contains the application manifests" + }, + "targetRevision": { + "description": "TargetRevision defines the revision of the source to sync the application to.\nIn case of Git, this can be commit, tag, or branch. If omitted, will equal to HEAD.\nIn case of Helm, this is a semver tag for the Chart's version.", + "type": "string" + } + } + }, + "v1alpha1ApplicationSourceDirectory": { + "type": "object", + "title": "ApplicationSourceDirectory holds options for applications of type plain YAML or Jsonnet", + "properties": { + "exclude": { + "type": "string", + "title": "Exclude contains a glob pattern to match paths against that should be explicitly excluded from being used during manifest generation" + }, + "include": { + "type": "string", + "title": "Include contains a glob pattern to match paths against that should be explicitly included during manifest generation" + }, + "jsonnet": { + "$ref": "#/definitions/v1alpha1ApplicationSourceJsonnet" + }, + "recurse": { + "type": "boolean", + "title": "Recurse specifies whether to scan a directory recursively for manifests" + } + } + }, + "v1alpha1ApplicationSourceHelm": { + "type": "object", + "title": "ApplicationSourceHelm holds helm specific options", + "properties": { + "apiVersions": { + "description": "APIVersions specifies the Kubernetes resource API versions to pass to Helm when templating manifests. By default,\nArgo CD uses the API versions of the target cluster. The format is [group/]version/kind.", + "type": "array", + "items": { + "type": "string" + } + }, + "fileParameters": { + "type": "array", + "title": "FileParameters are file parameters to the helm template", + "items": { + "$ref": "#/definitions/v1alpha1HelmFileParameter" + } + }, + "ignoreMissingValueFiles": { + "type": "boolean", + "title": "IgnoreMissingValueFiles prevents helm template from failing when valueFiles do not exist locally by not appending them to helm template --values" + }, + "kubeVersion": { + "description": "KubeVersion specifies the Kubernetes API version to pass to Helm when templating manifests. By default, Argo CD\nuses the Kubernetes version of the target cluster.", + "type": "string" + }, + "namespace": { + "description": "Namespace is an optional namespace to template with. If left empty, defaults to the app's destination namespace.", + "type": "string" + }, + "parameters": { + "type": "array", + "title": "Parameters is a list of Helm parameters which are passed to the helm template command upon manifest generation", + "items": { + "$ref": "#/definitions/v1alpha1HelmParameter" + } + }, + "passCredentials": { + "type": "boolean", + "title": "PassCredentials pass credentials to all domains (Helm's --pass-credentials)" + }, + "releaseName": { + "type": "string", + "title": "ReleaseName is the Helm release name to use. If omitted it will use the application name" + }, + "skipCrds": { + "type": "boolean", + "title": "SkipCrds skips custom resource definition installation step (Helm's --skip-crds)" + }, + "skipSchemaValidation": { + "type": "boolean", + "title": "SkipSchemaValidation skips JSON schema validation (Helm's --skip-schema-validation)" + }, + "skipTests": { + "description": "SkipTests skips test manifest installation step (Helm's --skip-tests).", + "type": "boolean" + }, + "valueFiles": { + "type": "array", + "title": "ValuesFiles is a list of Helm value files to use when generating a template", + "items": { + "type": "string" + } + }, + "values": { + "type": "string", + "title": "Values specifies Helm values to be passed to helm template, typically defined as a block. ValuesObject takes precedence over Values, so use one or the other.\n+patchStrategy=replace" + }, + "valuesObject": { + "$ref": "#/definitions/runtimeRawExtension" + }, + "version": { + "type": "string", + "title": "Version is the Helm version to use for templating (\"3\")" + } + } + }, + "v1alpha1ApplicationSourceJsonnet": { + "type": "object", + "title": "ApplicationSourceJsonnet holds options specific to applications of type Jsonnet", + "properties": { + "extVars": { + "type": "array", + "title": "ExtVars is a list of Jsonnet External Variables", + "items": { + "$ref": "#/definitions/v1alpha1JsonnetVar" + } + }, + "libs": { + "type": "array", + "title": "Additional library search dirs", + "items": { + "type": "string" + } + }, + "tlas": { + "type": "array", + "title": "TLAS is a list of Jsonnet Top-level Arguments", + "items": { + "$ref": "#/definitions/v1alpha1JsonnetVar" + } + } + } + }, + "v1alpha1ApplicationSourceKustomize": { + "type": "object", + "title": "ApplicationSourceKustomize holds options specific to an Application source specific to Kustomize", + "properties": { + "apiVersions": { + "description": "APIVersions specifies the Kubernetes resource API versions to pass to Helm when templating manifests. By default,\nArgo CD uses the API versions of the target cluster. The format is [group/]version/kind.", + "type": "array", + "items": { + "type": "string" + } + }, + "commonAnnotations": { + "type": "object", + "title": "CommonAnnotations is a list of additional annotations to add to rendered manifests", + "additionalProperties": { + "type": "string" + } + }, + "commonAnnotationsEnvsubst": { + "type": "boolean", + "title": "CommonAnnotationsEnvsubst specifies whether to apply env variables substitution for annotation values" + }, + "commonLabels": { + "type": "object", + "title": "CommonLabels is a list of additional labels to add to rendered manifests", + "additionalProperties": { + "type": "string" + } + }, + "components": { + "type": "array", + "title": "Components specifies a list of kustomize components to add to the kustomization before building", + "items": { + "type": "string" + } + }, + "forceCommonAnnotations": { + "type": "boolean", + "title": "ForceCommonAnnotations specifies whether to force applying common annotations to resources for Kustomize apps" + }, + "forceCommonLabels": { + "type": "boolean", + "title": "ForceCommonLabels specifies whether to force applying common labels to resources for Kustomize apps" + }, + "images": { + "type": "array", + "title": "Images is a list of Kustomize image override specifications", + "items": { + "type": "string" + } + }, + "kubeVersion": { + "description": "KubeVersion specifies the Kubernetes API version to pass to Helm when templating manifests. By default, Argo CD\nuses the Kubernetes version of the target cluster.", + "type": "string" + }, + "labelWithoutSelector": { + "type": "boolean", + "title": "LabelWithoutSelector specifies whether to apply common labels to resource selectors or not" + }, + "namePrefix": { + "type": "string", + "title": "NamePrefix is a prefix appended to resources for Kustomize apps" + }, + "nameSuffix": { + "type": "string", + "title": "NameSuffix is a suffix appended to resources for Kustomize apps" + }, + "namespace": { + "type": "string", + "title": "Namespace sets the namespace that Kustomize adds to all resources" + }, + "patches": { + "type": "array", + "title": "Patches is a list of Kustomize patches", + "items": { + "$ref": "#/definitions/v1alpha1KustomizePatch" + } + }, + "replicas": { + "type": "array", + "title": "Replicas is a list of Kustomize Replicas override specifications", + "items": { + "$ref": "#/definitions/v1alpha1KustomizeReplica" + } + }, + "version": { + "type": "string", + "title": "Version controls which version of Kustomize to use for rendering manifests" + } + } + }, + "v1alpha1ApplicationSourcePlugin": { + "type": "object", + "title": "ApplicationSourcePlugin holds options specific to config management plugins", + "properties": { + "env": { + "type": "array", + "items": { + "$ref": "#/definitions/applicationv1alpha1EnvEntry" + } + }, + "name": { + "type": "string" + }, + "parameters": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSourcePluginParameter" + } + } + } + }, + "v1alpha1ApplicationSourcePluginParameter": { + "type": "object", + "properties": { + "array": { + "description": "Array is the value of an array type parameter.", + "type": "array", + "items": { + "type": "string" + } + }, + "map": { + "description": "Map is the value of a map type parameter.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "name": { + "description": "Name is the name identifying a parameter.", + "type": "string" + }, + "string": { + "description": "String_ is the value of a string type parameter.", + "type": "string" + } + } + }, + "v1alpha1ApplicationSpec": { + "description": "ApplicationSpec represents desired application state. Contains link to repository with application definition and additional parameters link definition revision.", + "type": "object", + "properties": { + "destination": { + "$ref": "#/definitions/v1alpha1ApplicationDestination" + }, + "ignoreDifferences": { + "type": "array", + "title": "IgnoreDifferences is a list of resources and their fields which should be ignored during comparison", + "items": { + "$ref": "#/definitions/v1alpha1ResourceIgnoreDifferences" + } + }, + "info": { + "type": "array", + "title": "Info contains a list of information (URLs, email addresses, and plain text) that relates to the application", + "items": { + "$ref": "#/definitions/v1alpha1Info" + } + }, + "project": { + "description": "Project is a reference to the project this application belongs to.\nThe empty string means that application belongs to the 'default' project.", + "type": "string" + }, + "revisionHistoryLimit": { + "description": "RevisionHistoryLimit limits the number of items kept in the application's revision history, which is used for informational purposes as well as for rollbacks to previous versions.\nThis should only be changed in exceptional circumstances.\nSetting to zero will store no history. This will reduce storage used.\nIncreasing will increase the space used to store the history, so we do not recommend increasing it.\nDefault is 10.", + "type": "integer", + "format": "int64" + }, + "source": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + }, + "sourceHydrator": { + "$ref": "#/definitions/v1alpha1SourceHydrator" + }, + "sources": { + "type": "array", + "title": "Sources is a reference to the location of the application's manifests or chart", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + } + }, + "syncPolicy": { + "$ref": "#/definitions/v1alpha1SyncPolicy" + } + } + }, + "v1alpha1ApplicationStatus": { + "type": "object", + "title": "ApplicationStatus contains status information for the application", + "properties": { + "conditions": { + "type": "array", + "title": "Conditions is a list of currently observed application conditions", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationCondition" + } + }, + "controllerNamespace": { + "type": "string", + "title": "ControllerNamespace indicates the namespace in which the application controller is located" + }, + "health": { + "$ref": "#/definitions/v1alpha1HealthStatus" + }, + "history": { + "type": "array", + "title": "History contains information about the application's sync history", + "items": { + "$ref": "#/definitions/v1alpha1RevisionHistory" + } + }, + "observedAt": { + "$ref": "#/definitions/v1Time" + }, + "operationState": { + "$ref": "#/definitions/v1alpha1OperationState" + }, + "reconciledAt": { + "$ref": "#/definitions/v1Time" + }, + "resourceHealthSource": { + "type": "string", + "title": "ResourceHealthSource indicates where the resource health status is stored: inline if not set or appTree" + }, + "resources": { + "type": "array", + "title": "Resources is a list of Kubernetes resources managed by this application", + "items": { + "$ref": "#/definitions/applicationv1alpha1ResourceStatus" + } + }, + "sourceHydrator": { + "$ref": "#/definitions/v1alpha1SourceHydratorStatus" + }, + "sourceType": { + "type": "string", + "title": "SourceType specifies the type of this application" + }, + "sourceTypes": { + "type": "array", + "title": "SourceTypes specifies the type of the sources included in the application", + "items": { + "type": "string" + } + }, + "summary": { + "$ref": "#/definitions/v1alpha1ApplicationSummary" + }, + "sync": { + "$ref": "#/definitions/v1alpha1SyncStatus" + } + } + }, + "v1alpha1ApplicationSummary": { + "type": "object", + "title": "ApplicationSummary contains information about URLs and container images used by an application", + "properties": { + "externalURLs": { + "description": "ExternalURLs holds all external URLs of application child resources.", + "type": "array", + "items": { + "type": "string" + } + }, + "images": { + "description": "Images holds all images of application child resources.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1alpha1ApplicationTree": { + "type": "object", + "title": "ApplicationTree holds nodes which belongs to the application\nTODO: describe purpose of this type", + "properties": { + "hosts": { + "type": "array", + "title": "Hosts holds list of Kubernetes nodes that run application related pods", + "items": { + "$ref": "#/definitions/v1alpha1HostInfo" + } + }, + "nodes": { + "description": "Nodes contains list of nodes which either directly managed by the application and children of directly managed nodes.", + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ResourceNode" + } + }, + "orphanedNodes": { + "description": "OrphanedNodes contains if or orphaned nodes: nodes which are not managed by the app but in the same namespace. List is populated only if orphaned resources enabled in app project.", + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ResourceNode" + } + }, + "shardsCount": { + "type": "integer", + "format": "int64", + "title": "ShardsCount contains total number of shards the application tree is split into" + } + } + }, + "v1alpha1ApplicationWatchEvent": { + "description": "ApplicationWatchEvent contains information about application change.", + "type": "object", + "properties": { + "application": { + "$ref": "#/definitions/v1alpha1Application" + }, + "type": { + "type": "string" + } + } + }, + "v1alpha1Backoff": { + "type": "object", + "title": "Backoff is the backoff strategy to use on subsequent retries for failing syncs", + "properties": { + "duration": { + "type": "string", + "title": "Duration is the amount to back off. Default unit is seconds, but could also be a duration (e.g. \"2m\", \"1h\")" + }, + "factor": { + "type": "integer", + "format": "int64", + "title": "Factor is a factor to multiply the base duration after each failed retry" + }, + "maxDuration": { + "type": "string", + "title": "MaxDuration is the maximum amount of time allowed for the backoff strategy" + } + } + }, + "v1alpha1BasicAuthBitbucketServer": { + "description": "BasicAuthBitbucketServer defines the username/(password or personal access token) for Basic auth.", + "type": "object", + "properties": { + "passwordRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + }, + "username": { + "type": "string", + "title": "Username for Basic auth" + } + } + }, + "v1alpha1BearerTokenBitbucket": { + "description": "BearerTokenBitbucket defines the Bearer token for BitBucket AppToken auth.", + "type": "object", + "properties": { + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1BearerTokenBitbucketCloud": { + "description": "BearerTokenBitbucketCloud defines the Bearer token for BitBucket AppToken auth.", + "type": "object", + "properties": { + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1ChartDetails": { + "type": "object", + "title": "ChartDetails contains helm chart metadata for a specific version", + "properties": { + "description": { + "type": "string" + }, + "home": { + "type": "string", + "title": "The URL of this projects home page, e.g. \"http://example.com\"" + }, + "maintainers": { + "type": "array", + "title": "List of maintainer details, name and email, e.g. [\"John Doe \"]", + "items": { + "type": "string" + } + } + } + }, + "v1alpha1Cluster": { + "type": "object", + "title": "Cluster is the definition of a cluster resource", + "properties": { + "annotations": { + "type": "object", + "title": "Annotations for cluster secret metadata", + "additionalProperties": { + "type": "string" + } + }, + "clusterResources": { + "description": "Indicates if cluster level resources should be managed. This setting is used only if cluster is connected in a namespaced mode.", + "type": "boolean" + }, + "config": { + "$ref": "#/definitions/v1alpha1ClusterConfig" + }, + "connectionState": { + "$ref": "#/definitions/v1alpha1ConnectionState" + }, + "info": { + "$ref": "#/definitions/v1alpha1ClusterInfo" + }, + "labels": { + "type": "object", + "title": "Labels for cluster secret metadata", + "additionalProperties": { + "type": "string" + } + }, + "name": { + "type": "string", + "title": "Name of the cluster. If omitted, will use the server address" + }, + "namespaces": { + "description": "Holds list of namespaces which are accessible in that cluster. Cluster level resources will be ignored if namespace list is not empty.", + "type": "array", + "items": { + "type": "string" + } + }, + "project": { + "type": "string", + "title": "Reference between project and cluster that allow you automatically to be added as item inside Destinations project entity" + }, + "refreshRequestedAt": { + "$ref": "#/definitions/v1Time" + }, + "server": { + "type": "string", + "title": "Server is the API server URL of the Kubernetes cluster" + }, + "serverVersion": { + "type": "string", + "title": "Deprecated: use Info.ServerVersion field instead.\nThe server version" + }, + "shard": { + "description": "Shard contains optional shard number. Calculated on the fly by the application controller if not specified.", + "type": "integer", + "format": "int64" + } + } + }, + "v1alpha1ClusterCacheInfo": { + "type": "object", + "title": "ClusterCacheInfo contains information about the cluster cache", + "properties": { + "apisCount": { + "type": "integer", + "format": "int64", + "title": "APIsCount holds number of observed Kubernetes API count" + }, + "lastCacheSyncTime": { + "$ref": "#/definitions/v1Time" + }, + "resourcesCount": { + "type": "integer", + "format": "int64", + "title": "ResourcesCount holds number of observed Kubernetes resources" + } + } + }, + "v1alpha1ClusterConfig": { + "description": "ClusterConfig is the configuration attributes. This structure is subset of the go-client\nrest.Config with annotations added for marshalling.", + "type": "object", + "properties": { + "awsAuthConfig": { + "$ref": "#/definitions/v1alpha1AWSAuthConfig" + }, + "bearerToken": { + "description": "Server requires Bearer authentication. This client will not attempt to use\nrefresh tokens for an OAuth2 flow.\nTODO: demonstrate an OAuth2 compatible client.", + "type": "string" + }, + "disableCompression": { + "description": "DisableCompression bypasses automatic GZip compression requests to the server.", + "type": "boolean" + }, + "execProviderConfig": { + "$ref": "#/definitions/v1alpha1ExecProviderConfig" + }, + "password": { + "type": "string" + }, + "proxyUrl": { + "type": "string", + "title": "ProxyURL is the URL to the proxy to be used for all requests send to the server" + }, + "tlsClientConfig": { + "$ref": "#/definitions/v1alpha1TLSClientConfig" + }, + "username": { + "type": "string", + "title": "Server requires Basic authentication" + } + } + }, + "v1alpha1ClusterGenerator": { + "description": "ClusterGenerator defines a generator to match against clusters registered with ArgoCD.", + "type": "object", + "properties": { + "flatList": { + "type": "boolean", + "title": "returns the clusters a single 'clusters' value in the template" + }, + "selector": { + "$ref": "#/definitions/v1LabelSelector" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + }, + "values": { + "type": "object", + "title": "Values contains key/value pairs which are passed directly as parameters to the template", + "additionalProperties": { + "type": "string" + } + } + } + }, + "v1alpha1ClusterInfo": { + "type": "object", + "title": "ClusterInfo contains information about the cluster", + "properties": { + "apiVersions": { + "type": "array", + "title": "APIVersions contains list of API versions supported by the cluster", + "items": { + "type": "string" + } + }, + "applicationsCount": { + "type": "integer", + "format": "int64", + "title": "ApplicationsCount is the number of applications managed by Argo CD on the cluster" + }, + "cacheInfo": { + "$ref": "#/definitions/v1alpha1ClusterCacheInfo" + }, + "connectionState": { + "$ref": "#/definitions/v1alpha1ConnectionState" + }, + "serverVersion": { + "type": "string", + "title": "ServerVersion contains information about the Kubernetes version of the cluster" + } + } + }, + "v1alpha1ClusterList": { + "description": "ClusterList is a collection of Clusters.", + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Cluster" + } + }, + "metadata": { + "$ref": "#/definitions/v1ListMeta" + } + } + }, + "v1alpha1Command": { + "type": "object", + "title": "Command holds binary path and arguments list", + "properties": { + "args": { + "type": "array", + "items": { + "type": "string" + } + }, + "command": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "v1alpha1ComparedTo": { + "type": "object", + "title": "ComparedTo contains application source and target which was used for resources comparison", + "properties": { + "destination": { + "$ref": "#/definitions/v1alpha1ApplicationDestination" + }, + "ignoreDifferences": { + "type": "array", + "title": "IgnoreDifferences is a reference to the application's ignored differences used for comparison", + "items": { + "$ref": "#/definitions/v1alpha1ResourceIgnoreDifferences" + } + }, + "source": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + }, + "sources": { + "type": "array", + "title": "Sources is a reference to the application's multiple sources used for comparison", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + } + } + } + }, + "v1alpha1ConfigManagementPlugin": { + "type": "object", + "title": "ConfigManagementPlugin contains config management plugin configuration", + "properties": { + "generate": { + "$ref": "#/definitions/v1alpha1Command" + }, + "init": { + "$ref": "#/definitions/v1alpha1Command" + }, + "lockRepo": { + "type": "boolean" + }, + "name": { + "type": "string" + } + } + }, + "v1alpha1ConfigMapKeyRef": { + "description": "Utility struct for a reference to a configmap key.", + "type": "object", + "properties": { + "configMapName": { + "type": "string" + }, + "key": { + "type": "string" + } + } + }, + "v1alpha1ConnectionState": { + "type": "object", + "title": "ConnectionState contains information about remote resource connection state, currently used for clusters and repositories", + "properties": { + "attemptedAt": { + "$ref": "#/definitions/v1Time" + }, + "message": { + "type": "string", + "title": "Message contains human readable information about the connection status" + }, + "status": { + "type": "string", + "title": "Status contains the current status indicator for the connection" + } + } + }, + "v1alpha1DrySource": { + "description": "DrySource specifies a location for dry \"don't repeat yourself\" manifest source information.", + "type": "object", + "properties": { + "path": { + "type": "string", + "title": "Path is a directory path within the Git repository where the manifests are located" + }, + "repoURL": { + "type": "string", + "title": "RepoURL is the URL to the git repository that contains the application manifests" + }, + "targetRevision": { + "type": "string", + "title": "TargetRevision defines the revision of the source to hydrate" + } + } + }, + "v1alpha1DuckTypeGenerator": { + "description": "DuckType defines a generator to match against clusters registered with ArgoCD.", + "type": "object", + "properties": { + "configMapRef": { + "type": "string", + "title": "ConfigMapRef is a ConfigMap with the duck type definitions needed to retrieve the data\n this includes apiVersion(group/version), kind, matchKey and validation settings\nName is the resource name of the kind, group and version, defined in the ConfigMapRef\nRequeueAfterSeconds is how long before the duckType will be rechecked for a change" + }, + "labelSelector": { + "$ref": "#/definitions/v1LabelSelector" + }, + "name": { + "type": "string" + }, + "requeueAfterSeconds": { + "type": "integer", + "format": "int64" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + }, + "values": { + "type": "object", + "title": "Values contains key/value pairs which are passed directly as parameters to the template", + "additionalProperties": { + "type": "string" + } + } + } + }, + "v1alpha1ExecProviderConfig": { + "type": "object", + "title": "ExecProviderConfig is config used to call an external command to perform cluster authentication\nSee: https://godoc.org/k8s.io/client-go/tools/clientcmd/api#ExecConfig", + "properties": { + "apiVersion": { + "type": "string", + "title": "Preferred input version of the ExecInfo" + }, + "args": { + "type": "array", + "title": "Arguments to pass to the command when executing it", + "items": { + "type": "string" + } + }, + "command": { + "type": "string", + "title": "Command to execute" + }, + "env": { + "type": "object", + "title": "Env defines additional environment variables to expose to the process", + "additionalProperties": { + "type": "string" + } + }, + "installHint": { + "type": "string", + "title": "This text is shown to the user when the executable doesn't seem to be present" + } + } + }, + "v1alpha1GitDirectoryGeneratorItem": { + "type": "object", + "properties": { + "exclude": { + "type": "boolean" + }, + "path": { + "type": "string" + } + } + }, + "v1alpha1GitFileGeneratorItem": { + "type": "object", + "properties": { + "path": { + "type": "string" + } + } + }, + "v1alpha1GitGenerator": { + "type": "object", + "properties": { + "directories": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1GitDirectoryGeneratorItem" + } + }, + "files": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1GitFileGeneratorItem" + } + }, + "pathParamPrefix": { + "type": "string" + }, + "repoURL": { + "type": "string" + }, + "requeueAfterSeconds": { + "type": "integer", + "format": "int64" + }, + "revision": { + "type": "string" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + }, + "values": { + "type": "object", + "title": "Values contains key/value pairs which are passed directly as parameters to the template", + "additionalProperties": { + "type": "string" + } + } + } + }, + "v1alpha1GnuPGPublicKey": { + "type": "object", + "title": "GnuPGPublicKey is a representation of a GnuPG public key", + "properties": { + "fingerprint": { + "type": "string", + "title": "Fingerprint is the fingerprint of the key" + }, + "keyData": { + "type": "string", + "title": "KeyData holds the raw key data, in base64 encoded format" + }, + "keyID": { + "type": "string", + "title": "KeyID specifies the key ID, in hexadecimal string format" + }, + "owner": { + "type": "string", + "title": "Owner holds the owner identification, e.g. a name and e-mail address" + }, + "subType": { + "type": "string", + "title": "SubType holds the key's sub type (e.g. rsa4096)" + }, + "trust": { + "type": "string", + "title": "Trust holds the level of trust assigned to this key" + } + } + }, + "v1alpha1GnuPGPublicKeyList": { + "type": "object", + "title": "GnuPGPublicKeyList is a collection of GnuPGPublicKey objects", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1GnuPGPublicKey" + } + }, + "metadata": { + "$ref": "#/definitions/v1ListMeta" + } + } + }, + "v1alpha1HealthStatus": { + "type": "object", + "title": "HealthStatus contains information about the currently observed health state of an application or resource", + "properties": { + "lastTransitionTime": { + "$ref": "#/definitions/v1Time" + }, + "message": { + "type": "string", + "title": "Message is a human-readable informational message describing the health status" + }, + "status": { + "type": "string", + "title": "Status holds the status code of the application or resource" + } + } + }, + "v1alpha1HelmFileParameter": { + "type": "object", + "title": "HelmFileParameter is a file parameter that's passed to helm template during manifest generation", + "properties": { + "name": { + "type": "string", + "title": "Name is the name of the Helm parameter" + }, + "path": { + "type": "string", + "title": "Path is the path to the file containing the values for the Helm parameter" + } + } + }, + "v1alpha1HelmParameter": { + "type": "object", + "title": "HelmParameter is a parameter that's passed to helm template during manifest generation", + "properties": { + "forceString": { + "type": "boolean", + "title": "ForceString determines whether to tell Helm to interpret booleans and numbers as strings" + }, + "name": { + "type": "string", + "title": "Name is the name of the Helm parameter" + }, + "value": { + "type": "string", + "title": "Value is the value for the Helm parameter" + } + } + }, + "v1alpha1HostInfo": { + "type": "object", + "title": "HostInfo holds host name and resources metrics\nTODO: describe purpose of this type\nTODO: describe members of this type", + "properties": { + "name": { + "type": "string" + }, + "resourcesInfo": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1HostResourceInfo" + } + }, + "systemInfo": { + "$ref": "#/definitions/v1NodeSystemInfo" + } + } + }, + "v1alpha1HostResourceInfo": { + "type": "object", + "title": "TODO: describe this type", + "properties": { + "capacity": { + "type": "integer", + "format": "int64" + }, + "requestedByApp": { + "type": "integer", + "format": "int64" + }, + "requestedByNeighbors": { + "type": "integer", + "format": "int64" + }, + "resourceName": { + "type": "string" + } + } + }, + "v1alpha1HydrateOperation": { + "type": "object", + "title": "HydrateOperation contains information about the most recent hydrate operation", + "properties": { + "drySHA": { + "type": "string", + "title": "DrySHA holds the resolved revision (sha) of the dry source as of the most recent reconciliation" + }, + "finishedAt": { + "$ref": "#/definitions/v1Time" + }, + "hydratedSHA": { + "type": "string", + "title": "HydratedSHA holds the resolved revision (sha) of the hydrated source as of the most recent reconciliation" + }, + "message": { + "type": "string", + "title": "Message contains a message describing the current status of the hydrate operation" + }, + "phase": { + "type": "string", + "title": "Phase indicates the status of the hydrate operation" + }, + "sourceHydrator": { + "$ref": "#/definitions/v1alpha1SourceHydrator" + }, + "startedAt": { + "$ref": "#/definitions/v1Time" + } + } + }, + "v1alpha1HydrateTo": { + "description": "HydrateTo specifies a location to which hydrated manifests should be pushed as a \"staging area\" before being moved to\nthe SyncSource. The RepoURL and Path are assumed based on the associated SyncSource config in the SourceHydrator.", + "type": "object", + "properties": { + "targetBranch": { + "type": "string", + "title": "TargetBranch is the branch to which hydrated manifests should be committed" + } + } + }, + "v1alpha1Info": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "v1alpha1InfoItem": { + "type": "object", + "title": "InfoItem contains arbitrary, human readable information about an application", + "properties": { + "name": { + "description": "Name is a human readable title for this piece of information.", + "type": "string" + }, + "value": { + "description": "Value is human readable content.", + "type": "string" + } + } + }, + "v1alpha1JWTToken": { + "type": "object", + "title": "JWTToken holds the issuedAt and expiresAt values of a token", + "properties": { + "exp": { + "type": "integer", + "format": "int64" + }, + "iat": { + "type": "integer", + "format": "int64" + }, + "id": { + "type": "string" + } + } + }, + "v1alpha1JWTTokens": { + "type": "object", + "title": "JWTTokens represents a list of JWT tokens", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1JWTToken" + } + } + } + }, + "v1alpha1JsonnetVar": { + "type": "object", + "title": "JsonnetVar represents a variable to be passed to jsonnet during manifest generation", + "properties": { + "code": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "v1alpha1KnownTypeField": { + "type": "object", + "title": "KnownTypeField contains mapping between CRD field and known Kubernetes type.\nThis is mainly used for unit conversion in unknown resources (e.g. 0.1 == 100mi)\nTODO: Describe the members of this type", + "properties": { + "field": { + "type": "string" + }, + "type": { + "type": "string" + } + } + }, + "v1alpha1KustomizeGvk": { + "type": "object", + "properties": { + "group": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "version": { + "type": "string" + } + } + }, + "v1alpha1KustomizeOptions": { + "type": "object", + "title": "KustomizeOptions are options for kustomize to use when building manifests", + "properties": { + "binaryPath": { + "type": "string", + "title": "BinaryPath holds optional path to kustomize binary" + }, + "buildOptions": { + "type": "string", + "title": "BuildOptions is a string of build parameters to use when calling `kustomize build`" + } + } + }, + "v1alpha1KustomizePatch": { + "type": "object", + "properties": { + "options": { + "type": "object", + "additionalProperties": { + "type": "boolean" + } + }, + "patch": { + "type": "string" + }, + "path": { + "type": "string" + }, + "target": { + "$ref": "#/definitions/v1alpha1KustomizeSelector" + } + } + }, + "v1alpha1KustomizeReplica": { + "type": "object", + "properties": { + "count": { + "$ref": "#/definitions/intstrIntOrString" + }, + "name": { + "type": "string", + "title": "Name of Deployment or StatefulSet" + } + } + }, + "v1alpha1KustomizeResId": { + "type": "object", + "properties": { + "gvk": { + "$ref": "#/definitions/v1alpha1KustomizeGvk" + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + } + } + }, + "v1alpha1KustomizeSelector": { + "type": "object", + "properties": { + "annotationSelector": { + "type": "string" + }, + "labelSelector": { + "type": "string" + }, + "resId": { + "$ref": "#/definitions/v1alpha1KustomizeResId" + } + } + }, + "v1alpha1ListGenerator": { + "type": "object", + "title": "ListGenerator include items info", + "properties": { + "elements": { + "type": "array", + "title": "+kubebuilder:validation:Optional", + "items": { + "$ref": "#/definitions/v1JSON" + } + }, + "elementsYaml": { + "type": "string" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + } + } + }, + "v1alpha1ManagedNamespaceMetadata": { + "type": "object", + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "v1alpha1MatrixGenerator": { + "description": "MatrixGenerator generates the cartesian product of two sets of parameters. The parameters are defined by two nested\ngenerators.", + "type": "object", + "properties": { + "generators": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSetNestedGenerator" + } + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + } + } + }, + "v1alpha1MergeGenerator": { + "description": "MergeGenerator merges the output of two or more generators. Where the values for all specified merge keys are equal\nbetween two sets of generated parameters, the parameter sets will be merged with the parameters from the latter\ngenerator taking precedence. Parameter sets with merge keys not present in the base generator's params will be\nignored.\nFor example, if the first generator produced [{a: '1', b: '2'}, {c: '1', d: '1'}] and the second generator produced\n[{'a': 'override'}], the united parameters for merge keys = ['a'] would be\n[{a: 'override', b: '1'}, {c: '1', d: '1'}].\n\nMergeGenerator supports template overriding. If a MergeGenerator is one of multiple top-level generators, its\ntemplate will be merged with the top-level generator before the parameters are applied.", + "type": "object", + "properties": { + "generators": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSetNestedGenerator" + } + }, + "mergeKeys": { + "type": "array", + "items": { + "type": "string" + } + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + } + } + }, + "v1alpha1Operation": { + "type": "object", + "title": "Operation contains information about a requested or running operation", + "properties": { + "info": { + "type": "array", + "title": "Info is a list of informational items for this operation", + "items": { + "$ref": "#/definitions/v1alpha1Info" + } + }, + "initiatedBy": { + "$ref": "#/definitions/v1alpha1OperationInitiator" + }, + "retry": { + "$ref": "#/definitions/v1alpha1RetryStrategy" + }, + "sync": { + "$ref": "#/definitions/v1alpha1SyncOperation" + } + } + }, + "v1alpha1OperationInitiator": { + "type": "object", + "title": "OperationInitiator contains information about the initiator of an operation", + "properties": { + "automated": { + "description": "Automated is set to true if operation was initiated automatically by the application controller.", + "type": "boolean" + }, + "username": { + "type": "string", + "title": "Username contains the name of a user who started operation" + } + } + }, + "v1alpha1OperationState": { + "type": "object", + "title": "OperationState contains information about state of a running operation", + "properties": { + "finishedAt": { + "$ref": "#/definitions/v1Time" + }, + "message": { + "description": "Message holds any pertinent messages when attempting to perform operation (typically errors).", + "type": "string" + }, + "operation": { + "$ref": "#/definitions/v1alpha1Operation" + }, + "phase": { + "type": "string", + "title": "Phase is the current phase of the operation" + }, + "retryCount": { + "type": "integer", + "format": "int64", + "title": "RetryCount contains time of operation retries" + }, + "startedAt": { + "$ref": "#/definitions/v1Time" + }, + "syncResult": { + "$ref": "#/definitions/v1alpha1SyncOperationResult" + } + } + }, + "v1alpha1OrphanedResourceKey": { + "type": "object", + "title": "OrphanedResourceKey is a reference to a resource to be ignored from", + "properties": { + "group": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "name": { + "type": "string" + } + } + }, + "v1alpha1OrphanedResourcesMonitorSettings": { + "type": "object", + "title": "OrphanedResourcesMonitorSettings holds settings of orphaned resources monitoring", + "properties": { + "ignore": { + "type": "array", + "title": "Ignore contains a list of resources that are to be excluded from orphaned resources monitoring", + "items": { + "$ref": "#/definitions/v1alpha1OrphanedResourceKey" + } + }, + "warn": { + "type": "boolean", + "title": "Warn indicates if warning condition should be created for apps which have orphaned resources" + } + } + }, + "v1alpha1OverrideIgnoreDiff": { + "type": "object", + "title": "OverrideIgnoreDiff contains configurations about how fields should be ignored during diffs between\nthe desired state and live state", + "properties": { + "jSONPointers": { + "type": "array", + "title": "JSONPointers is a JSON path list following the format defined in RFC4627 (https://datatracker.ietf.org/doc/html/rfc6902#section-3)", + "items": { + "type": "string" + } + }, + "jqPathExpressions": { + "type": "array", + "title": "JQPathExpressions is a JQ path list that will be evaludated during the diff process", + "items": { + "type": "string" + } + }, + "managedFieldsManagers": { + "type": "array", + "title": "ManagedFieldsManagers is a list of trusted managers. Fields mutated by those managers will take precedence over the\ndesired state defined in the SCM and won't be displayed in diffs", + "items": { + "type": "string" + } + } + } + }, + "v1alpha1PluginConfigMapRef": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of the ConfigMap" + } + } + }, + "v1alpha1PluginGenerator": { + "description": "PluginGenerator defines connection info specific to Plugin.", + "type": "object", + "properties": { + "configMapRef": { + "$ref": "#/definitions/v1alpha1PluginConfigMapRef" + }, + "input": { + "$ref": "#/definitions/v1alpha1PluginInput" + }, + "requeueAfterSeconds": { + "description": "RequeueAfterSeconds determines how long the ApplicationSet controller will wait before reconciling the ApplicationSet again.", + "type": "integer", + "format": "int64" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + }, + "values": { + "description": "Values contains key/value pairs which are passed directly as parameters to the template. These values will not be\nsent as parameters to the plugin.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "v1alpha1PluginInput": { + "type": "object", + "properties": { + "parameters": { + "description": "Parameters contains the information to pass to the plugin. It is a map. The keys must be strings, and the\nvalues can be any type.", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/v1JSON" + } + } + } + }, + "v1alpha1ProjectRole": { + "type": "object", + "title": "ProjectRole represents a role that has access to a project", + "properties": { + "description": { + "type": "string", + "title": "Description is a description of the role" + }, + "groups": { + "type": "array", + "title": "Groups are a list of OIDC group claims bound to this role", + "items": { + "type": "string" + } + }, + "jwtTokens": { + "type": "array", + "title": "JWTTokens are a list of generated JWT tokens bound to this role", + "items": { + "$ref": "#/definitions/v1alpha1JWTToken" + } + }, + "name": { + "type": "string", + "title": "Name is a name for this role" + }, + "policies": { + "type": "array", + "title": "Policies Stores a list of casbin formatted strings that define access policies for the role in the project", + "items": { + "type": "string" + } + } + } + }, + "v1alpha1PullRequestGenerator": { + "description": "PullRequestGenerator defines a generator that scrapes a PullRequest API to find candidate pull requests.", + "type": "object", + "properties": { + "azuredevops": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorAzureDevOps" + }, + "bitbucket": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorBitbucket" + }, + "bitbucketServer": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorBitbucketServer" + }, + "filters": { + "description": "Filters for which pull requests should be considered.", + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorFilter" + } + }, + "gitea": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorGitea" + }, + "github": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorGithub" + }, + "gitlab": { + "$ref": "#/definitions/v1alpha1PullRequestGeneratorGitLab" + }, + "requeueAfterSeconds": { + "description": "Standard parameters.", + "type": "integer", + "format": "int64" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + } + } + }, + "v1alpha1PullRequestGeneratorAzureDevOps": { + "description": "PullRequestGeneratorAzureDevOps defines connection info specific to AzureDevOps.", + "type": "object", + "properties": { + "api": { + "description": "The Azure DevOps API URL to talk to. If blank, use https://dev.azure.com/.", + "type": "string" + }, + "labels": { + "type": "array", + "title": "Labels is used to filter the PRs that you want to target", + "items": { + "type": "string" + } + }, + "organization": { + "description": "Azure DevOps org to scan. Required.", + "type": "string" + }, + "project": { + "description": "Azure DevOps project name to scan. Required.", + "type": "string" + }, + "repo": { + "description": "Azure DevOps repo name to scan. Required.", + "type": "string" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1PullRequestGeneratorBitbucket": { + "description": "PullRequestGeneratorBitbucket defines connection info specific to Bitbucket.", + "type": "object", + "properties": { + "api": { + "description": "The Bitbucket REST API URL to talk to. If blank, uses https://api.bitbucket.org/2.0.", + "type": "string" + }, + "basicAuth": { + "$ref": "#/definitions/v1alpha1BasicAuthBitbucketServer" + }, + "bearerToken": { + "$ref": "#/definitions/v1alpha1BearerTokenBitbucketCloud" + }, + "owner": { + "description": "Workspace to scan. Required.", + "type": "string" + }, + "repo": { + "description": "Repo name to scan. Required.", + "type": "string" + } + } + }, + "v1alpha1PullRequestGeneratorBitbucketServer": { + "description": "PullRequestGeneratorBitbucketServer defines connection info specific to BitbucketServer.", + "type": "object", + "properties": { + "api": { + "description": "The Bitbucket REST API URL to talk to e.g. https://bitbucket.org/rest Required.", + "type": "string" + }, + "basicAuth": { + "$ref": "#/definitions/v1alpha1BasicAuthBitbucketServer" + }, + "bearerToken": { + "$ref": "#/definitions/v1alpha1BearerTokenBitbucket" + }, + "caRef": { + "$ref": "#/definitions/v1alpha1ConfigMapKeyRef" + }, + "insecure": { + "type": "boolean", + "title": "Allow self-signed TLS / Certificates; default: false" + }, + "project": { + "description": "Project to scan. Required.", + "type": "string" + }, + "repo": { + "description": "Repo name to scan. Required.", + "type": "string" + } + } + }, + "v1alpha1PullRequestGeneratorFilter": { + "description": "PullRequestGeneratorFilter is a single pull request filter.\nIf multiple filter types are set on a single struct, they will be AND'd together. All filters must\npass for a pull request to be included.", + "type": "object", + "properties": { + "branchMatch": { + "type": "string" + }, + "targetBranchMatch": { + "type": "string" + } + } + }, + "v1alpha1PullRequestGeneratorGitLab": { + "description": "PullRequestGeneratorGitLab defines connection info specific to GitLab.", + "type": "object", + "properties": { + "api": { + "description": "The GitLab API URL to talk to. If blank, uses https://gitlab.com/.", + "type": "string" + }, + "caRef": { + "$ref": "#/definitions/v1alpha1ConfigMapKeyRef" + }, + "insecure": { + "type": "boolean", + "title": "Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false" + }, + "labels": { + "type": "array", + "title": "Labels is used to filter the MRs that you want to target", + "items": { + "type": "string" + } + }, + "project": { + "description": "GitLab project to scan. Required.", + "type": "string" + }, + "pullRequestState": { + "type": "string", + "title": "PullRequestState is an additional MRs filter to get only those with a certain state. Default: \"\" (all states)" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1PullRequestGeneratorGitea": { + "description": "PullRequestGeneratorGitea defines connection info specific to Gitea.", + "type": "object", + "properties": { + "api": { + "type": "string", + "title": "The Gitea API URL to talk to. Required" + }, + "insecure": { + "description": "Allow insecure tls, for self-signed certificates; default: false.", + "type": "boolean" + }, + "owner": { + "description": "Gitea org or user to scan. Required.", + "type": "string" + }, + "repo": { + "description": "Gitea repo name to scan. Required.", + "type": "string" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1PullRequestGeneratorGithub": { + "description": "PullRequestGenerator defines connection info specific to GitHub.", + "type": "object", + "properties": { + "api": { + "description": "The GitHub API URL to talk to. If blank, use https://api.github.com/.", + "type": "string" + }, + "appSecretName": { + "description": "AppSecretName is a reference to a GitHub App repo-creds secret with permission to access pull requests.", + "type": "string" + }, + "labels": { + "type": "array", + "title": "Labels is used to filter the PRs that you want to target", + "items": { + "type": "string" + } + }, + "owner": { + "description": "GitHub org or user to scan. Required.", + "type": "string" + }, + "repo": { + "description": "GitHub repo name to scan. Required.", + "type": "string" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1RepoCreds": { + "type": "object", + "title": "RepoCreds holds the definition for repository credentials", + "properties": { + "enableOCI": { + "type": "boolean", + "title": "EnableOCI specifies whether helm-oci support should be enabled for this repo" + }, + "forceHttpBasicAuth": { + "type": "boolean", + "title": "ForceHttpBasicAuth specifies whether Argo CD should attempt to force basic auth for HTTP connections" + }, + "gcpServiceAccountKey": { + "type": "string", + "title": "GCPServiceAccountKey specifies the service account key in JSON format to be used for getting credentials to Google Cloud Source repos" + }, + "githubAppEnterpriseBaseUrl": { + "type": "string", + "title": "GithubAppEnterpriseBaseURL specifies the GitHub API URL for GitHub app authentication. If empty will default to https://api.github.com" + }, + "githubAppID": { + "type": "integer", + "format": "int64", + "title": "GithubAppId specifies the Github App ID of the app used to access the repo for GitHub app authentication" + }, + "githubAppInstallationID": { + "type": "integer", + "format": "int64", + "title": "GithubAppInstallationId specifies the ID of the installed GitHub App for GitHub app authentication" + }, + "githubAppPrivateKey": { + "type": "string", + "title": "GithubAppPrivateKey specifies the private key PEM data for authentication via GitHub app" + }, + "noProxy": { + "type": "string", + "title": "NoProxy specifies a list of targets where the proxy isn't used, applies only in cases where the proxy is applied" + }, + "password": { + "type": "string", + "title": "Password for authenticating at the repo server" + }, + "proxy": { + "type": "string", + "title": "Proxy specifies the HTTP/HTTPS proxy used to access repos at the repo server" + }, + "sshPrivateKey": { + "type": "string", + "title": "SSHPrivateKey contains the private key data for authenticating at the repo server using SSH (only Git repos)" + }, + "tlsClientCertData": { + "type": "string", + "title": "TLSClientCertData specifies the TLS client cert data for authenticating at the repo server" + }, + "tlsClientCertKey": { + "type": "string", + "title": "TLSClientCertKey specifies the TLS client cert key for authenticating at the repo server" + }, + "type": { + "description": "Type specifies the type of the repoCreds. Can be either \"git\" or \"helm. \"git\" is assumed if empty or absent.", + "type": "string" + }, + "url": { + "type": "string", + "title": "URL is the URL to which these credentials match" + }, + "username": { + "type": "string", + "title": "Username for authenticating at the repo server" + } + } + }, + "v1alpha1RepoCredsList": { + "description": "RepositoryList is a collection of Repositories.", + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1RepoCreds" + } + }, + "metadata": { + "$ref": "#/definitions/v1ListMeta" + } + } + }, + "v1alpha1Repository": { + "type": "object", + "title": "Repository is a repository holding application configurations", + "properties": { + "connectionState": { + "$ref": "#/definitions/v1alpha1ConnectionState" + }, + "enableLfs": { + "description": "EnableLFS specifies whether git-lfs support should be enabled for this repo. Only valid for Git repositories.", + "type": "boolean" + }, + "enableOCI": { + "type": "boolean", + "title": "EnableOCI specifies whether helm-oci support should be enabled for this repo" + }, + "forceHttpBasicAuth": { + "type": "boolean", + "title": "ForceHttpBasicAuth specifies whether Argo CD should attempt to force basic auth for HTTP connections" + }, + "gcpServiceAccountKey": { + "type": "string", + "title": "GCPServiceAccountKey specifies the service account key in JSON format to be used for getting credentials to Google Cloud Source repos" + }, + "githubAppEnterpriseBaseUrl": { + "type": "string", + "title": "GithubAppEnterpriseBaseURL specifies the base URL of GitHub Enterprise installation. If empty will default to https://api.github.com" + }, + "githubAppID": { + "type": "integer", + "format": "int64", + "title": "GithubAppId specifies the ID of the GitHub app used to access the repo" + }, + "githubAppInstallationID": { + "type": "integer", + "format": "int64", + "title": "GithubAppInstallationId specifies the installation ID of the GitHub App used to access the repo" + }, + "githubAppPrivateKey": { + "type": "string", + "title": "Github App Private Key PEM data" + }, + "inheritedCreds": { + "type": "boolean", + "title": "Whether credentials were inherited from a credential set" + }, + "insecure": { + "type": "boolean", + "title": "Insecure specifies whether the connection to the repository ignores any errors when verifying TLS certificates or SSH host keys" + }, + "insecureIgnoreHostKey": { + "type": "boolean", + "title": "InsecureIgnoreHostKey should not be used anymore, Insecure is favoured\nUsed only for Git repos" + }, + "name": { + "type": "string", + "title": "Name specifies a name to be used for this repo. Only used with Helm repos" + }, + "noProxy": { + "type": "string", + "title": "NoProxy specifies a list of targets where the proxy isn't used, applies only in cases where the proxy is applied" + }, + "password": { + "type": "string", + "title": "Password contains the password or PAT used for authenticating at the remote repository" + }, + "project": { + "type": "string", + "title": "Reference between project and repository that allows it to be automatically added as an item inside SourceRepos project entity" + }, + "proxy": { + "type": "string", + "title": "Proxy specifies the HTTP/HTTPS proxy used to access the repo" + }, + "repo": { + "type": "string", + "title": "Repo contains the URL to the remote repository" + }, + "sshPrivateKey": { + "description": "SSHPrivateKey contains the PEM data for authenticating at the repo server. Only used with Git repos.", + "type": "string" + }, + "tlsClientCertData": { + "type": "string", + "title": "TLSClientCertData contains a certificate in PEM format for authenticating at the repo server" + }, + "tlsClientCertKey": { + "type": "string", + "title": "TLSClientCertKey contains a private key in PEM format for authenticating at the repo server" + }, + "type": { + "description": "Type specifies the type of the repo. Can be either \"git\" or \"helm. \"git\" is assumed if empty or absent.", + "type": "string" + }, + "username": { + "type": "string", + "title": "Username contains the user name used for authenticating at the remote repository" + } + } + }, + "v1alpha1RepositoryCertificate": { + "type": "object", + "title": "A RepositoryCertificate is either SSH known hosts entry or TLS certificate", + "properties": { + "certData": { + "type": "string", + "format": "byte", + "title": "CertData contains the actual certificate data, dependent on the certificate type" + }, + "certInfo": { + "type": "string", + "title": "CertInfo will hold additional certificate info, depdendent on the certificate type (e.g. SSH fingerprint, X509 CommonName)" + }, + "certSubType": { + "type": "string", + "title": "CertSubType specifies the sub type of the cert, i.e. \"ssh-rsa\"" + }, + "certType": { + "type": "string", + "title": "CertType specifies the type of the certificate - currently one of \"https\" or \"ssh\"" + }, + "serverName": { + "type": "string", + "title": "ServerName specifies the DNS name of the server this certificate is intended for" + } + } + }, + "v1alpha1RepositoryCertificateList": { + "type": "object", + "title": "RepositoryCertificateList is a collection of RepositoryCertificates", + "properties": { + "items": { + "type": "array", + "title": "List of certificates to be processed", + "items": { + "$ref": "#/definitions/v1alpha1RepositoryCertificate" + } + }, + "metadata": { + "$ref": "#/definitions/v1ListMeta" + } + } + }, + "v1alpha1RepositoryList": { + "description": "RepositoryList is a collection of Repositories.", + "type": "object", + "properties": { + "items": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1Repository" + } + }, + "metadata": { + "$ref": "#/definitions/v1ListMeta" + } + } + }, + "v1alpha1ResourceAction": { + "type": "object", + "title": "TODO: describe this type\nTODO: describe members of this type", + "properties": { + "disabled": { + "type": "boolean" + }, + "displayName": { + "type": "string" + }, + "iconClass": { + "type": "string" + }, + "name": { + "type": "string" + }, + "params": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ResourceActionParam" + } + } + } + }, + "v1alpha1ResourceActionParam": { + "type": "object", + "title": "TODO: describe this type\nTODO: describe members of this type", + "properties": { + "default": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "v1alpha1ResourceDiff": { + "type": "object", + "title": "ResourceDiff holds the diff of a live and target resource object\nTODO: describe members of this type", + "properties": { + "diff": { + "type": "string", + "title": "Diff contains the JSON patch between target and live resource\nDeprecated: use NormalizedLiveState and PredictedLiveState to render the difference" + }, + "group": { + "type": "string" + }, + "hook": { + "type": "boolean" + }, + "kind": { + "type": "string" + }, + "liveState": { + "type": "string", + "title": "TargetState contains the JSON live resource manifest" + }, + "modified": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "normalizedLiveState": { + "type": "string", + "title": "NormalizedLiveState contains JSON serialized live resource state with applied normalizations" + }, + "predictedLiveState": { + "type": "string", + "title": "PredictedLiveState contains JSON serialized resource state that is calculated based on normalized and target resource state" + }, + "resourceVersion": { + "type": "string" + }, + "targetState": { + "type": "string", + "title": "TargetState contains the JSON serialized resource manifest defined in the Git/Helm" + } + } + }, + "v1alpha1ResourceIgnoreDifferences": { + "description": "ResourceIgnoreDifferences contains resource filter and list of json paths which should be ignored during comparison with live state.", + "type": "object", + "properties": { + "group": { + "type": "string" + }, + "jqPathExpressions": { + "type": "array", + "items": { + "type": "string" + } + }, + "jsonPointers": { + "type": "array", + "items": { + "type": "string" + } + }, + "kind": { + "type": "string" + }, + "managedFieldsManagers": { + "type": "array", + "title": "ManagedFieldsManagers is a list of trusted managers. Fields mutated by those managers will take precedence over the\ndesired state defined in the SCM and won't be displayed in diffs", + "items": { + "type": "string" + } + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + } + } + }, + "v1alpha1ResourceNetworkingInfo": { + "type": "object", + "title": "ResourceNetworkingInfo holds networking resource related information\nTODO: describe members of this type", + "properties": { + "externalURLs": { + "description": "ExternalURLs holds list of URLs which should be available externally. List is populated for ingress resources using rules hostnames.", + "type": "array", + "items": { + "type": "string" + } + }, + "ingress": { + "type": "array", + "items": { + "$ref": "#/definitions/v1LoadBalancerIngress" + } + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "targetLabels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "targetRefs": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ResourceRef" + } + } + } + }, + "v1alpha1ResourceNode": { + "type": "object", + "title": "ResourceNode contains information about live resource and its children\nTODO: describe members of this type", + "properties": { + "createdAt": { + "$ref": "#/definitions/v1Time" + }, + "health": { + "$ref": "#/definitions/v1alpha1HealthStatus" + }, + "images": { + "type": "array", + "items": { + "type": "string" + } + }, + "info": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1InfoItem" + } + }, + "networkingInfo": { + "$ref": "#/definitions/v1alpha1ResourceNetworkingInfo" + }, + "parentRefs": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1ResourceRef" + } + }, + "resourceVersion": { + "type": "string" + } + }, + "allOf": [ + { + "$ref": "#/definitions/v1alpha1ResourceRef" + } + ] + }, + "v1alpha1ResourceOverride": { + "type": "object", + "title": "ResourceOverride holds configuration to customize resource diffing and health assessment\nTODO: describe the members of this type", + "properties": { + "actions": { + "type": "string" + }, + "healthLua": { + "type": "string" + }, + "ignoreDifferences": { + "$ref": "#/definitions/v1alpha1OverrideIgnoreDiff" + }, + "ignoreResourceUpdates": { + "$ref": "#/definitions/v1alpha1OverrideIgnoreDiff" + }, + "knownTypeFields": { + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1KnownTypeField" + } + }, + "useOpenLibs": { + "type": "boolean" + } + } + }, + "v1alpha1ResourceRef": { + "type": "object", + "title": "ResourceRef includes fields which uniquely identify a resource", + "properties": { + "group": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "uid": { + "type": "string" + }, + "version": { + "type": "string" + } + } + }, + "v1alpha1ResourceResult": { + "type": "object", + "title": "ResourceResult holds the operation result details of a specific resource", + "properties": { + "group": { + "type": "string", + "title": "Group specifies the API group of the resource" + }, + "hookPhase": { + "description": "HookPhase contains the state of any operation associated with this resource OR hook\nThis can also contain values for non-hook resources.", + "type": "string" + }, + "hookType": { + "type": "string", + "title": "HookType specifies the type of the hook. Empty for non-hook resources" + }, + "kind": { + "type": "string", + "title": "Kind specifies the API kind of the resource" + }, + "message": { + "type": "string", + "title": "Message contains an informational or error message for the last sync OR operation" + }, + "name": { + "type": "string", + "title": "Name specifies the name of the resource" + }, + "namespace": { + "type": "string", + "title": "Namespace specifies the target namespace of the resource" + }, + "status": { + "type": "string", + "title": "Status holds the final result of the sync. Will be empty if the resources is yet to be applied/pruned and is always zero-value for hooks" + }, + "syncPhase": { + "type": "string", + "title": "SyncPhase indicates the particular phase of the sync that this result was acquired in" + }, + "version": { + "type": "string", + "title": "Version specifies the API version of the resource" + } + } + }, + "v1alpha1RetryStrategy": { + "type": "object", + "title": "RetryStrategy contains information about the strategy to apply when a sync failed", + "properties": { + "backoff": { + "$ref": "#/definitions/v1alpha1Backoff" + }, + "limit": { + "description": "Limit is the maximum number of attempts for retrying a failed sync. If set to 0, no retries will be performed.", + "type": "integer", + "format": "int64" + } + } + }, + "v1alpha1RevisionHistory": { + "type": "object", + "title": "RevisionHistory contains history information about a previous sync", + "properties": { + "deployStartedAt": { + "$ref": "#/definitions/v1Time" + }, + "deployedAt": { + "$ref": "#/definitions/v1Time" + }, + "id": { + "type": "integer", + "format": "int64", + "title": "ID is an auto incrementing identifier of the RevisionHistory" + }, + "initiatedBy": { + "$ref": "#/definitions/v1alpha1OperationInitiator" + }, + "revision": { + "type": "string", + "title": "Revision holds the revision the sync was performed against" + }, + "revisions": { + "type": "array", + "title": "Revisions holds the revision of each source in sources field the sync was performed against", + "items": { + "type": "string" + } + }, + "source": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + }, + "sources": { + "type": "array", + "title": "Sources is a reference to the application sources used for the sync operation", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + } + } + } + }, + "v1alpha1RevisionMetadata": { + "type": "object", + "title": "RevisionMetadata contains metadata for a specific revision in a Git repository", + "properties": { + "author": { + "type": "string", + "title": "who authored this revision,\ntypically their name and email, e.g. \"John Doe \",\nbut might not match this example" + }, + "date": { + "$ref": "#/definitions/v1Time" + }, + "message": { + "description": "Message contains the message associated with the revision, most likely the commit message.", + "type": "string" + }, + "signatureInfo": { + "description": "SignatureInfo contains a hint on the signer if the revision was signed with GPG, and signature verification is enabled.", + "type": "string" + }, + "tags": { + "type": "array", + "title": "Tags specifies any tags currently attached to the revision\nFloating tags can move from one revision to another", + "items": { + "type": "string" + } + } + } + }, + "v1alpha1SCMProviderGenerator": { + "description": "SCMProviderGenerator defines a generator that scrapes a SCMaaS API to find candidate repos.", + "type": "object", + "properties": { + "awsCodeCommit": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorAWSCodeCommit" + }, + "azureDevOps": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorAzureDevOps" + }, + "bitbucket": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorBitbucket" + }, + "bitbucketServer": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorBitbucketServer" + }, + "cloneProtocol": { + "description": "Which protocol to use for the SCM URL. Default is provider-specific but ssh if possible. Not all providers\nnecessarily support all protocols.", + "type": "string" + }, + "filters": { + "description": "Filters for which repos should be considered.", + "type": "array", + "items": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorFilter" + } + }, + "gitea": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorGitea" + }, + "github": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorGithub" + }, + "gitlab": { + "$ref": "#/definitions/v1alpha1SCMProviderGeneratorGitlab" + }, + "requeueAfterSeconds": { + "description": "Standard parameters.", + "type": "integer", + "format": "int64" + }, + "template": { + "$ref": "#/definitions/v1alpha1ApplicationSetTemplate" + }, + "values": { + "type": "object", + "title": "Values contains key/value pairs which are passed directly as parameters to the template", + "additionalProperties": { + "type": "string" + } + } + } + }, + "v1alpha1SCMProviderGeneratorAWSCodeCommit": { + "description": "SCMProviderGeneratorAWSCodeCommit defines connection info specific to AWS CodeCommit.", + "type": "object", + "properties": { + "allBranches": { + "description": "Scan all branches instead of just the default branch.", + "type": "boolean" + }, + "region": { + "description": "Region provides the AWS region to discover repos.\nif not provided, AppSet controller will infer the current region from environment.", + "type": "string" + }, + "role": { + "description": "Role provides the AWS IAM role to assume, for cross-account repo discovery\nif not provided, AppSet controller will use its pod/node identity to discover.", + "type": "string" + }, + "tagFilters": { + "type": "array", + "title": "TagFilters provides the tag filter(s) for repo discovery", + "items": { + "$ref": "#/definitions/v1alpha1TagFilter" + } + } + } + }, + "v1alpha1SCMProviderGeneratorAzureDevOps": { + "description": "SCMProviderGeneratorAzureDevOps defines connection info specific to Azure DevOps.", + "type": "object", + "properties": { + "accessTokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + }, + "allBranches": { + "description": "Scan all branches instead of just the default branch.", + "type": "boolean" + }, + "api": { + "description": "The URL to Azure DevOps. If blank, use https://dev.azure.com.", + "type": "string" + }, + "organization": { + "description": "Azure Devops organization. Required. E.g. \"my-organization\".", + "type": "string" + }, + "teamProject": { + "description": "Azure Devops team project. Required. E.g. \"my-team\".", + "type": "string" + } + } + }, + "v1alpha1SCMProviderGeneratorBitbucket": { + "description": "SCMProviderGeneratorBitbucket defines connection info specific to Bitbucket Cloud (API version 2).", + "type": "object", + "properties": { + "allBranches": { + "description": "Scan all branches instead of just the main branch.", + "type": "boolean" + }, + "appPasswordRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + }, + "owner": { + "description": "Bitbucket workspace to scan. Required.", + "type": "string" + }, + "user": { + "type": "string", + "title": "Bitbucket user to use when authenticating. Should have a \"member\" role to be able to read all repositories and branches. Required" + } + } + }, + "v1alpha1SCMProviderGeneratorBitbucketServer": { + "description": "SCMProviderGeneratorBitbucketServer defines connection info specific to Bitbucket Server.", + "type": "object", + "properties": { + "allBranches": { + "description": "Scan all branches instead of just the default branch.", + "type": "boolean" + }, + "api": { + "description": "The Bitbucket Server REST API URL to talk to. Required.", + "type": "string" + }, + "basicAuth": { + "$ref": "#/definitions/v1alpha1BasicAuthBitbucketServer" + }, + "bearerToken": { + "$ref": "#/definitions/v1alpha1BearerTokenBitbucket" + }, + "caRef": { + "$ref": "#/definitions/v1alpha1ConfigMapKeyRef" + }, + "insecure": { + "type": "boolean", + "title": "Allow self-signed TLS / Certificates; default: false" + }, + "project": { + "description": "Project to scan. Required.", + "type": "string" + } + } + }, + "v1alpha1SCMProviderGeneratorFilter": { + "description": "SCMProviderGeneratorFilter is a single repository filter.\nIf multiple filter types are set on a single struct, they will be AND'd together. All filters must\npass for a repo to be included.", + "type": "object", + "properties": { + "branchMatch": { + "description": "A regex which must match the branch name.", + "type": "string" + }, + "labelMatch": { + "description": "A regex which must match at least one label.", + "type": "string" + }, + "pathsDoNotExist": { + "description": "An array of paths, all of which must not exist.", + "type": "array", + "items": { + "type": "string" + } + }, + "pathsExist": { + "description": "An array of paths, all of which must exist.", + "type": "array", + "items": { + "type": "string" + } + }, + "repositoryMatch": { + "description": "A regex for repo names.", + "type": "string" + } + } + }, + "v1alpha1SCMProviderGeneratorGitea": { + "description": "SCMProviderGeneratorGitea defines a connection info specific to Gitea.", + "type": "object", + "properties": { + "allBranches": { + "description": "Scan all branches instead of just the default branch.", + "type": "boolean" + }, + "api": { + "description": "The Gitea URL to talk to. For example https://gitea.mydomain.com/.", + "type": "string" + }, + "insecure": { + "type": "boolean", + "title": "Allow self-signed TLS / Certificates; default: false" + }, + "owner": { + "description": "Gitea organization or user to scan. Required.", + "type": "string" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1SCMProviderGeneratorGithub": { + "description": "SCMProviderGeneratorGithub defines connection info specific to GitHub.", + "type": "object", + "properties": { + "allBranches": { + "description": "Scan all branches instead of just the default branch.", + "type": "boolean" + }, + "api": { + "description": "The GitHub API URL to talk to. If blank, use https://api.github.com/.", + "type": "string" + }, + "appSecretName": { + "description": "AppSecretName is a reference to a GitHub App repo-creds secret.", + "type": "string" + }, + "organization": { + "description": "GitHub org to scan. Required.", + "type": "string" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + } + } + }, + "v1alpha1SCMProviderGeneratorGitlab": { + "description": "SCMProviderGeneratorGitlab defines connection info specific to Gitlab.", + "type": "object", + "properties": { + "allBranches": { + "description": "Scan all branches instead of just the default branch.", + "type": "boolean" + }, + "api": { + "description": "The Gitlab API URL to talk to.", + "type": "string" + }, + "caRef": { + "$ref": "#/definitions/v1alpha1ConfigMapKeyRef" + }, + "group": { + "description": "Gitlab group to scan. Required. You can use either the project id (recommended) or the full namespaced path.", + "type": "string" + }, + "includeSharedProjects": { + "type": "boolean", + "title": "When recursing through subgroups, also include shared Projects (true) or scan only the subgroups under same path (false). Defaults to \"true\"" + }, + "includeSubgroups": { + "type": "boolean", + "title": "Recurse through subgroups (true) or scan only the base group (false). Defaults to \"false\"" + }, + "insecure": { + "type": "boolean", + "title": "Skips validating the SCM provider's TLS certificate - useful for self-signed certificates.; default: false" + }, + "tokenRef": { + "$ref": "#/definitions/v1alpha1SecretRef" + }, + "topic": { + "description": "Filter repos list based on Gitlab Topic.", + "type": "string" + } + } + }, + "v1alpha1SecretRef": { + "description": "Utility struct for a reference to a secret key.", + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "secretName": { + "type": "string" + } + } + }, + "v1alpha1SignatureKey": { + "type": "object", + "title": "SignatureKey is the specification of a key required to verify commit signatures with", + "properties": { + "keyID": { + "type": "string", + "title": "The ID of the key in hexadecimal notation" + } + } + }, + "v1alpha1SourceHydrator": { + "description": "SourceHydrator specifies a dry \"don't repeat yourself\" source for manifests, a sync source from which to sync\nhydrated manifests, and an optional hydrateTo location to act as a \"staging\" aread for hydrated manifests.", + "type": "object", + "properties": { + "drySource": { + "$ref": "#/definitions/v1alpha1DrySource" + }, + "hydrateTo": { + "$ref": "#/definitions/v1alpha1HydrateTo" + }, + "syncSource": { + "$ref": "#/definitions/v1alpha1SyncSource" + } + } + }, + "v1alpha1SourceHydratorStatus": { + "type": "object", + "title": "SourceHydratorStatus contains information about the current state of source hydration", + "properties": { + "currentOperation": { + "$ref": "#/definitions/v1alpha1HydrateOperation" + }, + "lastSuccessfulOperation": { + "$ref": "#/definitions/v1alpha1SuccessfulHydrateOperation" + } + } + }, + "v1alpha1SuccessfulHydrateOperation": { + "type": "object", + "title": "SuccessfulHydrateOperation contains information about the most recent successful hydrate operation", + "properties": { + "drySHA": { + "type": "string", + "title": "DrySHA holds the resolved revision (sha) of the dry source as of the most recent reconciliation" + }, + "hydratedSHA": { + "type": "string", + "title": "HydratedSHA holds the resolved revision (sha) of the hydrated source as of the most recent reconciliation" + }, + "sourceHydrator": { + "$ref": "#/definitions/v1alpha1SourceHydrator" + } + } + }, + "v1alpha1SyncOperation": { + "description": "SyncOperation contains details about a sync operation.", + "type": "object", + "properties": { + "autoHealAttemptsCount": { + "type": "integer", + "format": "int64", + "title": "SelfHealAttemptsCount contains the number of auto-heal attempts" + }, + "dryRun": { + "type": "boolean", + "title": "DryRun specifies to perform a `kubectl apply --dry-run` without actually performing the sync" + }, + "manifests": { + "type": "array", + "title": "Manifests is an optional field that overrides sync source with a local directory for development", + "items": { + "type": "string" + } + }, + "prune": { + "type": "boolean", + "title": "Prune specifies to delete resources from the cluster that are no longer tracked in git" + }, + "resources": { + "type": "array", + "title": "Resources describes which resources shall be part of the sync", + "items": { + "$ref": "#/definitions/v1alpha1SyncOperationResource" + } + }, + "revision": { + "description": "Revision is the revision (Git) or chart version (Helm) which to sync the application to\nIf omitted, will use the revision specified in app spec.", + "type": "string" + }, + "revisions": { + "description": "Revisions is the list of revision (Git) or chart version (Helm) which to sync each source in sources field for the application to\nIf omitted, will use the revision specified in app spec.", + "type": "array", + "items": { + "type": "string" + } + }, + "source": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + }, + "sources": { + "type": "array", + "title": "Sources overrides the source definition set in the application.\nThis is typically set in a Rollback operation and is nil during a Sync operation", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + } + }, + "syncOptions": { + "type": "array", + "title": "SyncOptions provide per-sync sync-options, e.g. Validate=false", + "items": { + "type": "string" + } + }, + "syncStrategy": { + "$ref": "#/definitions/v1alpha1SyncStrategy" + } + } + }, + "v1alpha1SyncOperationResource": { + "description": "SyncOperationResource contains resources to sync.", + "type": "object", + "properties": { + "group": { + "type": "string" + }, + "kind": { + "type": "string" + }, + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + } + } + }, + "v1alpha1SyncOperationResult": { + "type": "object", + "title": "SyncOperationResult represent result of sync operation", + "properties": { + "managedNamespaceMetadata": { + "$ref": "#/definitions/v1alpha1ManagedNamespaceMetadata" + }, + "resources": { + "type": "array", + "title": "Resources contains a list of sync result items for each individual resource in a sync operation", + "items": { + "$ref": "#/definitions/v1alpha1ResourceResult" + } + }, + "revision": { + "type": "string", + "title": "Revision holds the revision this sync operation was performed to" + }, + "revisions": { + "type": "array", + "title": "Revisions holds the revision this sync operation was performed for respective indexed source in sources field", + "items": { + "type": "string" + } + }, + "source": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + }, + "sources": { + "type": "array", + "title": "Source records the application source information of the sync, used for comparing auto-sync", + "items": { + "$ref": "#/definitions/v1alpha1ApplicationSource" + } + } + } + }, + "v1alpha1SyncPolicy": { + "type": "object", + "title": "SyncPolicy controls when a sync will be performed in response to updates in git", + "properties": { + "automated": { + "$ref": "#/definitions/v1alpha1SyncPolicyAutomated" + }, + "managedNamespaceMetadata": { + "$ref": "#/definitions/v1alpha1ManagedNamespaceMetadata" + }, + "retry": { + "$ref": "#/definitions/v1alpha1RetryStrategy" + }, + "syncOptions": { + "type": "array", + "title": "Options allow you to specify whole app sync-options", + "items": { + "type": "string" + } + } + } + }, + "v1alpha1SyncPolicyAutomated": { + "type": "object", + "title": "SyncPolicyAutomated controls the behavior of an automated sync", + "properties": { + "allowEmpty": { + "type": "boolean", + "title": "AllowEmpty allows apps have zero live resources (default: false)" + }, + "prune": { + "type": "boolean", + "title": "Prune specifies whether to delete resources from the cluster that are not found in the sources anymore as part of automated sync (default: false)" + }, + "selfHeal": { + "type": "boolean", + "title": "SelfHeal specifies whether to revert resources back to their desired state upon modification in the cluster (default: false)" + } + } + }, + "v1alpha1SyncSource": { + "description": "SyncSource specifies a location from which hydrated manifests may be synced. RepoURL is assumed based on the\nassociated DrySource config in the SourceHydrator.", + "type": "object", + "properties": { + "path": { + "description": "Path is a directory path within the git repository where hydrated manifests should be committed to and synced\nfrom. If hydrateTo is set, this is just the path from which hydrated manifests will be synced.", + "type": "string" + }, + "targetBranch": { + "type": "string", + "title": "TargetBranch is the branch to which hydrated manifests should be committed" + } + } + }, + "v1alpha1SyncStatus": { + "type": "object", + "title": "SyncStatus contains information about the currently observed live and desired states of an application", + "properties": { + "comparedTo": { + "$ref": "#/definitions/v1alpha1ComparedTo" + }, + "revision": { + "type": "string", + "title": "Revision contains information about the revision the comparison has been performed to" + }, + "revisions": { + "type": "array", + "title": "Revisions contains information about the revisions of multiple sources the comparison has been performed to", + "items": { + "type": "string" + } + }, + "status": { + "type": "string", + "title": "Status is the sync state of the comparison" + } + } + }, + "v1alpha1SyncStrategy": { + "type": "object", + "title": "SyncStrategy controls the manner in which a sync is performed", + "properties": { + "apply": { + "$ref": "#/definitions/v1alpha1SyncStrategyApply" + }, + "hook": { + "$ref": "#/definitions/v1alpha1SyncStrategyHook" + } + } + }, + "v1alpha1SyncStrategyApply": { + "type": "object", + "title": "SyncStrategyApply uses `kubectl apply` to perform the apply", + "properties": { + "force": { + "description": "Force indicates whether or not to supply the --force flag to `kubectl apply`.\nThe --force flag deletes and re-create the resource, when PATCH encounters conflict and has\nretried for 5 times.", + "type": "boolean" + } + } + }, + "v1alpha1SyncStrategyHook": { + "description": "SyncStrategyHook will perform a sync using hooks annotations.\nIf no hook annotation is specified falls back to `kubectl apply`.", + "type": "object", + "properties": { + "syncStrategyApply": { + "$ref": "#/definitions/v1alpha1SyncStrategyApply" + } + } + }, + "v1alpha1SyncWindow": { + "type": "object", + "title": "SyncWindow contains the kind, time, duration and attributes that are used to assign the syncWindows to apps", + "properties": { + "applications": { + "type": "array", + "title": "Applications contains a list of applications that the window will apply to", + "items": { + "type": "string" + } + }, + "clusters": { + "type": "array", + "title": "Clusters contains a list of clusters that the window will apply to", + "items": { + "type": "string" + } + }, + "duration": { + "type": "string", + "title": "Duration is the amount of time the sync window will be open" + }, + "kind": { + "type": "string", + "title": "Kind defines if the window allows or blocks syncs" + }, + "manualSync": { + "type": "boolean", + "title": "ManualSync enables manual syncs when they would otherwise be blocked" + }, + "namespaces": { + "type": "array", + "title": "Namespaces contains a list of namespaces that the window will apply to", + "items": { + "type": "string" + } + }, + "schedule": { + "type": "string", + "title": "Schedule is the time the window will begin, specified in cron format" + }, + "timeZone": { + "type": "string", + "title": "TimeZone of the sync that will be applied to the schedule" + } + } + }, + "v1alpha1TLSClientConfig": { + "type": "object", + "title": "TLSClientConfig contains settings to enable transport layer security", + "properties": { + "caData": { + "type": "string", + "format": "byte", + "title": "CAData holds PEM-encoded bytes (typically read from a root certificates bundle).\nCAData takes precedence over CAFile" + }, + "certData": { + "type": "string", + "format": "byte", + "title": "CertData holds PEM-encoded bytes (typically read from a client certificate file).\nCertData takes precedence over CertFile" + }, + "insecure": { + "description": "Insecure specifies that the server should be accessed without verifying the TLS certificate. For testing only.", + "type": "boolean" + }, + "keyData": { + "type": "string", + "format": "byte", + "title": "KeyData holds PEM-encoded bytes (typically read from a client certificate key file).\nKeyData takes precedence over KeyFile" + }, + "serverName": { + "description": "ServerName is passed to the server for SNI and is used in the client to check server\ncertificates against. If ServerName is empty, the hostname used to contact the\nserver is used.", + "type": "string" + } + } + }, + "v1alpha1TagFilter": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "versionVersionMessage": { + "type": "object", + "title": "VersionMessage represents version of the Argo CD API server", + "properties": { + "BuildDate": { + "type": "string" + }, + "Compiler": { + "type": "string" + }, + "ExtraBuildInfo": { + "type": "string" + }, + "GitCommit": { + "type": "string" + }, + "GitTag": { + "type": "string" + }, + "GitTreeState": { + "type": "string" + }, + "GoVersion": { + "type": "string" + }, + "HelmVersion": { + "type": "string" + }, + "JsonnetVersion": { + "type": "string" + }, + "KubectlVersion": { + "type": "string" + }, + "KustomizeVersion": { + "type": "string" + }, + "Platform": { + "type": "string" + }, + "Version": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/common/common.go b/vendor/github.com/argoproj/argo-cd/v2/common/common.go index d9174d841..82e0d91f7 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/common/common.go +++ b/vendor/github.com/argoproj/argo-cd/v2/common/common.go @@ -26,6 +26,8 @@ const ( const ( // DefaultRepoServerAddr is the gRPC address of the Argo CD repo server DefaultRepoServerAddr = "argocd-repo-server:8081" + // DefaultCommitServerAddr is the gRPC address of the Argo CD commit server + DefaultCommitServerAddr = "argocd-commit-server:8086" // DefaultDexServerAddr is the HTTP address of the Dex OIDC server, which we run a reverse proxy against DefaultDexServerAddr = "argocd-dex-server:5556" // DefaultRedisAddr is the default redis address @@ -62,15 +64,19 @@ const ( DefaultPortArgoCDMetrics = 8082 DefaultPortArgoCDAPIServerMetrics = 8083 DefaultPortRepoServerMetrics = 8084 + DefaultPortCommitServer = 8086 + DefaultPortCommitServerMetrics = 8087 ) // DefaultAddressAPIServer for ArgoCD components const ( - DefaultAddressAdminDashboard = "localhost" - DefaultAddressAPIServer = "0.0.0.0" - DefaultAddressAPIServerMetrics = "0.0.0.0" - DefaultAddressRepoServer = "0.0.0.0" - DefaultAddressRepoServerMetrics = "0.0.0.0" + DefaultAddressAdminDashboard = "localhost" + DefaultAddressAPIServer = "0.0.0.0" + DefaultAddressAPIServerMetrics = "0.0.0.0" + DefaultAddressRepoServer = "0.0.0.0" + DefaultAddressRepoServerMetrics = "0.0.0.0" + DefaultAddressCommitServer = "0.0.0.0" + DefaultAddressCommitServerMetrics = "0.0.0.0" ) // Default paths on the pod's file system @@ -175,6 +181,10 @@ const ( LabelValueSecretTypeRepository = "repository" // LabelValueSecretTypeRepoCreds indicates a secret type of repository credentials LabelValueSecretTypeRepoCreds = "repo-creds" + // LabelValueSecretTypeRepositoryWrite indicates a secret type of repository credentials for writing + LabelValueSecretTypeRepositoryWrite = "repository-write" + // LabelValueSecretTypeSCMCreds indicates a secret type of SCM credentials + LabelValueSecretTypeSCMCreds = "scm-creds" // AnnotationKeyAppInstance is the Argo CD application name is used as the instance name AnnotationKeyAppInstance = "argocd.argoproj.io/tracking-id" @@ -183,6 +193,10 @@ const ( // AnnotationCompareOptions is a comma-separated list of options for comparison AnnotationCompareOptions = "argocd.argoproj.io/compare-options" + // AnnotationIgnoreHealthCheck when set on an Application's immediate child indicates that its health check + // can be disregarded. + AnnotationIgnoreHealthCheck = "argocd.argoproj.io/ignore-healthcheck" + // AnnotationKeyManagedBy is annotation name which indicates that k8s resource is managed by an application. AnnotationKeyManagedBy = "managed-by" // AnnotationValueManagedByArgoCD is a 'managed-by' annotation value for resources managed by Argo CD @@ -255,6 +269,8 @@ const ( EnvHelmIndexCacheDuration = "ARGOCD_HELM_INDEX_CACHE_DURATION" // EnvAppConfigPath allows to override the configuration path for repo server EnvAppConfigPath = "ARGOCD_APP_CONF_PATH" + // EnvAuthToken is the environment variable name for the auth token used by the CLI + EnvAuthToken = "ARGOCD_AUTH_TOKEN" // EnvLogFormat log format that is defined by `--logformat` option EnvLogFormat = "ARGOCD_LOG_FORMAT" // EnvLogLevel log level that is defined by `--loglevel` option @@ -314,7 +330,10 @@ const ( // Constants used by util/clusterauth package const ( ClusterAuthRequestTimeout = 10 * time.Second - BearerTokenTimeout = 30 * time.Second +) + +const ( + BearerTokenTimeout = 30 * time.Second ) const ( @@ -424,8 +443,10 @@ var PermissionDeniedAPIError = status.Error(codes.PermissionDenied, "permission // Redis password consts const ( - DefaultRedisInitialPasswordSecretName = "argocd-redis" - DefaultRedisInitialPasswordKey = "auth" + // RedisInitialCredentials is the name for the argocd kubernetes secret which will have the redis password + RedisInitialCredentials = "argocd-redis" + // RedisInitialCredentialsKey is the key for the argocd kubernetes secret that maps to the redis password + RedisInitialCredentialsKey = "auth" ) /* @@ -434,17 +455,17 @@ SetOptionalRedisPasswordFromKubeConfig sets the optional Redis password if it ex We specify kubeClient as kubernetes.Interface to allow for mocking in tests, but this should be treated as a kubernetes.Clientset param. */ func SetOptionalRedisPasswordFromKubeConfig(ctx context.Context, kubeClient kubernetes.Interface, namespace string, redisOptions *redis.Options) error { - secret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, DefaultRedisInitialPasswordSecretName, v1.GetOptions{}) + secret, err := kubeClient.CoreV1().Secrets(namespace).Get(ctx, RedisInitialCredentials, v1.GetOptions{}) if err != nil { - return fmt.Errorf("failed to get secret %s/%s: %w", namespace, DefaultRedisInitialPasswordSecretName, err) + return fmt.Errorf("failed to get secret %s/%s: %w", namespace, RedisInitialCredentials, err) } if secret == nil { - return fmt.Errorf("failed to get secret %s/%s: secret is nil", namespace, DefaultRedisInitialPasswordSecretName) + return fmt.Errorf("failed to get secret %s/%s: secret is nil", namespace, RedisInitialCredentials) } - _, ok := secret.Data[DefaultRedisInitialPasswordKey] + _, ok := secret.Data[RedisInitialCredentialsKey] if !ok { - return fmt.Errorf("secret %s/%s does not contain key %s", namespace, DefaultRedisInitialPasswordSecretName, DefaultRedisInitialPasswordKey) + return fmt.Errorf("secret %s/%s does not contain key %s", namespace, RedisInitialCredentials, RedisInitialCredentialsKey) } - redisOptions.Password = string(secret.Data[DefaultRedisInitialPasswordKey]) + redisOptions.Password = string(secret.Data[RedisInitialCredentialsKey]) return nil } diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/app_project_types.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/app_project_types.go index 903d8aab2..436e57854 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/app_project_types.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/app_project_types.go @@ -483,7 +483,6 @@ func (proj AppProject) IsDestinationPermitted(dst ApplicationDestination, projec func (proj AppProject) isDestinationMatched(dst ApplicationDestination) bool { anyDestinationMatched := false - noDenyDestinationsMatched := true for _, item := range proj.Spec.Destinations { dstNameMatched := dst.Name != "" && globMatch(item.Name, dst.Name, true) @@ -493,12 +492,14 @@ func (proj AppProject) isDestinationMatched(dst ApplicationDestination) bool { matched := (dstServerMatched || dstNameMatched) && dstNamespaceMatched if matched { anyDestinationMatched = true - } else if ((!dstNameMatched && isDenyPattern(item.Name)) || (!dstServerMatched && isDenyPattern(item.Server))) || (!dstNamespaceMatched && isDenyPattern(item.Namespace)) { - noDenyDestinationsMatched = false + } else if (!dstNameMatched && isDenyPattern(item.Name)) || (!dstServerMatched && isDenyPattern(item.Server)) && dstNamespaceMatched { + return false + } else if !dstNamespaceMatched && isDenyPattern(item.Namespace) && dstServerMatched { + return false } } - return anyDestinationMatched && noDenyDestinationsMatched + return anyDestinationMatched } func isDenyPattern(pattern string) bool { diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/application_annotations.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/application_annotations.go index 2c5e4ac3f..6395b5dbe 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/application_annotations.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/application_annotations.go @@ -4,6 +4,8 @@ const ( // AnnotationKeyRefresh is the annotation key which indicates that app needs to be refreshed. Removed by application controller after app is refreshed. // Might take values 'normal'/'hard'. Value 'hard' means manifest cache and target cluster state cache should be invalidated before refresh. AnnotationKeyRefresh string = "argocd.argoproj.io/refresh" + // AnnotationKeyHydrate is the annotation key which indicates that app needs to be hydrated. Removed by application controller after app is hydrated. + AnnotationKeyHydrate string = "argocd.argoproj.io/hydrate" // AnnotationKeyManifestGeneratePaths is an annotation that contains a list of semicolon-separated paths in the // manifests repository that affects the manifest generation. Paths might be either relative or absolute. The diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go index d4446130c..16f9eeecd 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/applicationset_types.go @@ -379,6 +379,9 @@ type ClusterGenerator struct { // Values contains key/value pairs which are passed directly as parameters to the template Values map[string]string `json:"values,omitempty" protobuf:"bytes,3,name=values"` + + // returns the clusters a single 'clusters' value in the template + FlatList bool `json:"flatList,omitempty" protobuf:"bytes,4,name=flatList"` } // DuckType defines a generator to match against clusters registered with ArgoCD. diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go index 9266e7e40..bdb236820 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.pb.go @@ -1665,10 +1665,38 @@ func (m *ConnectionState) XXX_DiscardUnknown() { var xxx_messageInfo_ConnectionState proto.InternalMessageInfo +func (m *DrySource) Reset() { *m = DrySource{} } +func (*DrySource) ProtoMessage() {} +func (*DrySource) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{58} +} +func (m *DrySource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DrySource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DrySource) XXX_Merge(src proto.Message) { + xxx_messageInfo_DrySource.Merge(m, src) +} +func (m *DrySource) XXX_Size() int { + return m.Size() +} +func (m *DrySource) XXX_DiscardUnknown() { + xxx_messageInfo_DrySource.DiscardUnknown(m) +} + +var xxx_messageInfo_DrySource proto.InternalMessageInfo + func (m *DuckTypeGenerator) Reset() { *m = DuckTypeGenerator{} } func (*DuckTypeGenerator) ProtoMessage() {} func (*DuckTypeGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{58} + return fileDescriptor_030104ce3b95bcac, []int{59} } func (m *DuckTypeGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1696,7 +1724,7 @@ var xxx_messageInfo_DuckTypeGenerator proto.InternalMessageInfo func (m *EnvEntry) Reset() { *m = EnvEntry{} } func (*EnvEntry) ProtoMessage() {} func (*EnvEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{59} + return fileDescriptor_030104ce3b95bcac, []int{60} } func (m *EnvEntry) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1724,7 +1752,7 @@ var xxx_messageInfo_EnvEntry proto.InternalMessageInfo func (m *ErrApplicationNotAllowedToUseProject) Reset() { *m = ErrApplicationNotAllowedToUseProject{} } func (*ErrApplicationNotAllowedToUseProject) ProtoMessage() {} func (*ErrApplicationNotAllowedToUseProject) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{60} + return fileDescriptor_030104ce3b95bcac, []int{61} } func (m *ErrApplicationNotAllowedToUseProject) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1752,7 +1780,7 @@ var xxx_messageInfo_ErrApplicationNotAllowedToUseProject proto.InternalMessageIn func (m *ExecProviderConfig) Reset() { *m = ExecProviderConfig{} } func (*ExecProviderConfig) ProtoMessage() {} func (*ExecProviderConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{61} + return fileDescriptor_030104ce3b95bcac, []int{62} } func (m *ExecProviderConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1780,7 +1808,7 @@ var xxx_messageInfo_ExecProviderConfig proto.InternalMessageInfo func (m *GitDirectoryGeneratorItem) Reset() { *m = GitDirectoryGeneratorItem{} } func (*GitDirectoryGeneratorItem) ProtoMessage() {} func (*GitDirectoryGeneratorItem) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{62} + return fileDescriptor_030104ce3b95bcac, []int{63} } func (m *GitDirectoryGeneratorItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1808,7 +1836,7 @@ var xxx_messageInfo_GitDirectoryGeneratorItem proto.InternalMessageInfo func (m *GitFileGeneratorItem) Reset() { *m = GitFileGeneratorItem{} } func (*GitFileGeneratorItem) ProtoMessage() {} func (*GitFileGeneratorItem) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{63} + return fileDescriptor_030104ce3b95bcac, []int{64} } func (m *GitFileGeneratorItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1836,7 +1864,7 @@ var xxx_messageInfo_GitFileGeneratorItem proto.InternalMessageInfo func (m *GitGenerator) Reset() { *m = GitGenerator{} } func (*GitGenerator) ProtoMessage() {} func (*GitGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{64} + return fileDescriptor_030104ce3b95bcac, []int{65} } func (m *GitGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1864,7 +1892,7 @@ var xxx_messageInfo_GitGenerator proto.InternalMessageInfo func (m *GnuPGPublicKey) Reset() { *m = GnuPGPublicKey{} } func (*GnuPGPublicKey) ProtoMessage() {} func (*GnuPGPublicKey) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{65} + return fileDescriptor_030104ce3b95bcac, []int{66} } func (m *GnuPGPublicKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1892,7 +1920,7 @@ var xxx_messageInfo_GnuPGPublicKey proto.InternalMessageInfo func (m *GnuPGPublicKeyList) Reset() { *m = GnuPGPublicKeyList{} } func (*GnuPGPublicKeyList) ProtoMessage() {} func (*GnuPGPublicKeyList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{66} + return fileDescriptor_030104ce3b95bcac, []int{67} } func (m *GnuPGPublicKeyList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1920,7 +1948,7 @@ var xxx_messageInfo_GnuPGPublicKeyList proto.InternalMessageInfo func (m *HealthStatus) Reset() { *m = HealthStatus{} } func (*HealthStatus) ProtoMessage() {} func (*HealthStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{67} + return fileDescriptor_030104ce3b95bcac, []int{68} } func (m *HealthStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1948,7 +1976,7 @@ var xxx_messageInfo_HealthStatus proto.InternalMessageInfo func (m *HelmFileParameter) Reset() { *m = HelmFileParameter{} } func (*HelmFileParameter) ProtoMessage() {} func (*HelmFileParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{68} + return fileDescriptor_030104ce3b95bcac, []int{69} } func (m *HelmFileParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1976,7 +2004,7 @@ var xxx_messageInfo_HelmFileParameter proto.InternalMessageInfo func (m *HelmOptions) Reset() { *m = HelmOptions{} } func (*HelmOptions) ProtoMessage() {} func (*HelmOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{69} + return fileDescriptor_030104ce3b95bcac, []int{70} } func (m *HelmOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2004,7 +2032,7 @@ var xxx_messageInfo_HelmOptions proto.InternalMessageInfo func (m *HelmParameter) Reset() { *m = HelmParameter{} } func (*HelmParameter) ProtoMessage() {} func (*HelmParameter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{70} + return fileDescriptor_030104ce3b95bcac, []int{71} } func (m *HelmParameter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2032,7 +2060,7 @@ var xxx_messageInfo_HelmParameter proto.InternalMessageInfo func (m *HostInfo) Reset() { *m = HostInfo{} } func (*HostInfo) ProtoMessage() {} func (*HostInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{71} + return fileDescriptor_030104ce3b95bcac, []int{72} } func (m *HostInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2060,7 +2088,7 @@ var xxx_messageInfo_HostInfo proto.InternalMessageInfo func (m *HostResourceInfo) Reset() { *m = HostResourceInfo{} } func (*HostResourceInfo) ProtoMessage() {} func (*HostResourceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{72} + return fileDescriptor_030104ce3b95bcac, []int{73} } func (m *HostResourceInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2085,10 +2113,66 @@ func (m *HostResourceInfo) XXX_DiscardUnknown() { var xxx_messageInfo_HostResourceInfo proto.InternalMessageInfo +func (m *HydrateOperation) Reset() { *m = HydrateOperation{} } +func (*HydrateOperation) ProtoMessage() {} +func (*HydrateOperation) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{74} +} +func (m *HydrateOperation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HydrateOperation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HydrateOperation) XXX_Merge(src proto.Message) { + xxx_messageInfo_HydrateOperation.Merge(m, src) +} +func (m *HydrateOperation) XXX_Size() int { + return m.Size() +} +func (m *HydrateOperation) XXX_DiscardUnknown() { + xxx_messageInfo_HydrateOperation.DiscardUnknown(m) +} + +var xxx_messageInfo_HydrateOperation proto.InternalMessageInfo + +func (m *HydrateTo) Reset() { *m = HydrateTo{} } +func (*HydrateTo) ProtoMessage() {} +func (*HydrateTo) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{75} +} +func (m *HydrateTo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HydrateTo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HydrateTo) XXX_Merge(src proto.Message) { + xxx_messageInfo_HydrateTo.Merge(m, src) +} +func (m *HydrateTo) XXX_Size() int { + return m.Size() +} +func (m *HydrateTo) XXX_DiscardUnknown() { + xxx_messageInfo_HydrateTo.DiscardUnknown(m) +} + +var xxx_messageInfo_HydrateTo proto.InternalMessageInfo + func (m *Info) Reset() { *m = Info{} } func (*Info) ProtoMessage() {} func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{73} + return fileDescriptor_030104ce3b95bcac, []int{76} } func (m *Info) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2116,7 +2200,7 @@ var xxx_messageInfo_Info proto.InternalMessageInfo func (m *InfoItem) Reset() { *m = InfoItem{} } func (*InfoItem) ProtoMessage() {} func (*InfoItem) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{74} + return fileDescriptor_030104ce3b95bcac, []int{77} } func (m *InfoItem) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2144,7 +2228,7 @@ var xxx_messageInfo_InfoItem proto.InternalMessageInfo func (m *JWTToken) Reset() { *m = JWTToken{} } func (*JWTToken) ProtoMessage() {} func (*JWTToken) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{75} + return fileDescriptor_030104ce3b95bcac, []int{78} } func (m *JWTToken) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2172,7 +2256,7 @@ var xxx_messageInfo_JWTToken proto.InternalMessageInfo func (m *JWTTokens) Reset() { *m = JWTTokens{} } func (*JWTTokens) ProtoMessage() {} func (*JWTTokens) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{76} + return fileDescriptor_030104ce3b95bcac, []int{79} } func (m *JWTTokens) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2200,7 +2284,7 @@ var xxx_messageInfo_JWTTokens proto.InternalMessageInfo func (m *JsonnetVar) Reset() { *m = JsonnetVar{} } func (*JsonnetVar) ProtoMessage() {} func (*JsonnetVar) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{77} + return fileDescriptor_030104ce3b95bcac, []int{80} } func (m *JsonnetVar) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2228,7 +2312,7 @@ var xxx_messageInfo_JsonnetVar proto.InternalMessageInfo func (m *KnownTypeField) Reset() { *m = KnownTypeField{} } func (*KnownTypeField) ProtoMessage() {} func (*KnownTypeField) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{78} + return fileDescriptor_030104ce3b95bcac, []int{81} } func (m *KnownTypeField) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2256,7 +2340,7 @@ var xxx_messageInfo_KnownTypeField proto.InternalMessageInfo func (m *KustomizeGvk) Reset() { *m = KustomizeGvk{} } func (*KustomizeGvk) ProtoMessage() {} func (*KustomizeGvk) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{79} + return fileDescriptor_030104ce3b95bcac, []int{82} } func (m *KustomizeGvk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2284,7 +2368,7 @@ var xxx_messageInfo_KustomizeGvk proto.InternalMessageInfo func (m *KustomizeOptions) Reset() { *m = KustomizeOptions{} } func (*KustomizeOptions) ProtoMessage() {} func (*KustomizeOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{80} + return fileDescriptor_030104ce3b95bcac, []int{83} } func (m *KustomizeOptions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2312,7 +2396,7 @@ var xxx_messageInfo_KustomizeOptions proto.InternalMessageInfo func (m *KustomizePatch) Reset() { *m = KustomizePatch{} } func (*KustomizePatch) ProtoMessage() {} func (*KustomizePatch) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{81} + return fileDescriptor_030104ce3b95bcac, []int{84} } func (m *KustomizePatch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2340,7 +2424,7 @@ var xxx_messageInfo_KustomizePatch proto.InternalMessageInfo func (m *KustomizeReplica) Reset() { *m = KustomizeReplica{} } func (*KustomizeReplica) ProtoMessage() {} func (*KustomizeReplica) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{82} + return fileDescriptor_030104ce3b95bcac, []int{85} } func (m *KustomizeReplica) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2368,7 +2452,7 @@ var xxx_messageInfo_KustomizeReplica proto.InternalMessageInfo func (m *KustomizeResId) Reset() { *m = KustomizeResId{} } func (*KustomizeResId) ProtoMessage() {} func (*KustomizeResId) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{83} + return fileDescriptor_030104ce3b95bcac, []int{86} } func (m *KustomizeResId) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2396,7 +2480,7 @@ var xxx_messageInfo_KustomizeResId proto.InternalMessageInfo func (m *KustomizeSelector) Reset() { *m = KustomizeSelector{} } func (*KustomizeSelector) ProtoMessage() {} func (*KustomizeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{84} + return fileDescriptor_030104ce3b95bcac, []int{87} } func (m *KustomizeSelector) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2424,7 +2508,7 @@ var xxx_messageInfo_KustomizeSelector proto.InternalMessageInfo func (m *ListGenerator) Reset() { *m = ListGenerator{} } func (*ListGenerator) ProtoMessage() {} func (*ListGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{85} + return fileDescriptor_030104ce3b95bcac, []int{88} } func (m *ListGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2452,7 +2536,7 @@ var xxx_messageInfo_ListGenerator proto.InternalMessageInfo func (m *ManagedNamespaceMetadata) Reset() { *m = ManagedNamespaceMetadata{} } func (*ManagedNamespaceMetadata) ProtoMessage() {} func (*ManagedNamespaceMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{86} + return fileDescriptor_030104ce3b95bcac, []int{89} } func (m *ManagedNamespaceMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2480,7 +2564,7 @@ var xxx_messageInfo_ManagedNamespaceMetadata proto.InternalMessageInfo func (m *MatrixGenerator) Reset() { *m = MatrixGenerator{} } func (*MatrixGenerator) ProtoMessage() {} func (*MatrixGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{87} + return fileDescriptor_030104ce3b95bcac, []int{90} } func (m *MatrixGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2508,7 +2592,7 @@ var xxx_messageInfo_MatrixGenerator proto.InternalMessageInfo func (m *MergeGenerator) Reset() { *m = MergeGenerator{} } func (*MergeGenerator) ProtoMessage() {} func (*MergeGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{88} + return fileDescriptor_030104ce3b95bcac, []int{91} } func (m *MergeGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2536,7 +2620,7 @@ var xxx_messageInfo_MergeGenerator proto.InternalMessageInfo func (m *NestedMatrixGenerator) Reset() { *m = NestedMatrixGenerator{} } func (*NestedMatrixGenerator) ProtoMessage() {} func (*NestedMatrixGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{89} + return fileDescriptor_030104ce3b95bcac, []int{92} } func (m *NestedMatrixGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2564,7 +2648,7 @@ var xxx_messageInfo_NestedMatrixGenerator proto.InternalMessageInfo func (m *NestedMergeGenerator) Reset() { *m = NestedMergeGenerator{} } func (*NestedMergeGenerator) ProtoMessage() {} func (*NestedMergeGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{90} + return fileDescriptor_030104ce3b95bcac, []int{93} } func (m *NestedMergeGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2592,7 +2676,7 @@ var xxx_messageInfo_NestedMergeGenerator proto.InternalMessageInfo func (m *Operation) Reset() { *m = Operation{} } func (*Operation) ProtoMessage() {} func (*Operation) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{91} + return fileDescriptor_030104ce3b95bcac, []int{94} } func (m *Operation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2620,7 +2704,7 @@ var xxx_messageInfo_Operation proto.InternalMessageInfo func (m *OperationInitiator) Reset() { *m = OperationInitiator{} } func (*OperationInitiator) ProtoMessage() {} func (*OperationInitiator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{92} + return fileDescriptor_030104ce3b95bcac, []int{95} } func (m *OperationInitiator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2648,7 +2732,7 @@ var xxx_messageInfo_OperationInitiator proto.InternalMessageInfo func (m *OperationState) Reset() { *m = OperationState{} } func (*OperationState) ProtoMessage() {} func (*OperationState) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{93} + return fileDescriptor_030104ce3b95bcac, []int{96} } func (m *OperationState) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2676,7 +2760,7 @@ var xxx_messageInfo_OperationState proto.InternalMessageInfo func (m *OptionalArray) Reset() { *m = OptionalArray{} } func (*OptionalArray) ProtoMessage() {} func (*OptionalArray) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{94} + return fileDescriptor_030104ce3b95bcac, []int{97} } func (m *OptionalArray) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2704,7 +2788,7 @@ var xxx_messageInfo_OptionalArray proto.InternalMessageInfo func (m *OptionalMap) Reset() { *m = OptionalMap{} } func (*OptionalMap) ProtoMessage() {} func (*OptionalMap) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{95} + return fileDescriptor_030104ce3b95bcac, []int{98} } func (m *OptionalMap) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2732,7 +2816,7 @@ var xxx_messageInfo_OptionalMap proto.InternalMessageInfo func (m *OrphanedResourceKey) Reset() { *m = OrphanedResourceKey{} } func (*OrphanedResourceKey) ProtoMessage() {} func (*OrphanedResourceKey) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{96} + return fileDescriptor_030104ce3b95bcac, []int{99} } func (m *OrphanedResourceKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2760,7 +2844,7 @@ var xxx_messageInfo_OrphanedResourceKey proto.InternalMessageInfo func (m *OrphanedResourcesMonitorSettings) Reset() { *m = OrphanedResourcesMonitorSettings{} } func (*OrphanedResourcesMonitorSettings) ProtoMessage() {} func (*OrphanedResourcesMonitorSettings) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{97} + return fileDescriptor_030104ce3b95bcac, []int{100} } func (m *OrphanedResourcesMonitorSettings) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2788,7 +2872,7 @@ var xxx_messageInfo_OrphanedResourcesMonitorSettings proto.InternalMessageInfo func (m *OverrideIgnoreDiff) Reset() { *m = OverrideIgnoreDiff{} } func (*OverrideIgnoreDiff) ProtoMessage() {} func (*OverrideIgnoreDiff) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{98} + return fileDescriptor_030104ce3b95bcac, []int{101} } func (m *OverrideIgnoreDiff) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2816,7 +2900,7 @@ var xxx_messageInfo_OverrideIgnoreDiff proto.InternalMessageInfo func (m *PluginConfigMapRef) Reset() { *m = PluginConfigMapRef{} } func (*PluginConfigMapRef) ProtoMessage() {} func (*PluginConfigMapRef) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{99} + return fileDescriptor_030104ce3b95bcac, []int{102} } func (m *PluginConfigMapRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2844,7 +2928,7 @@ var xxx_messageInfo_PluginConfigMapRef proto.InternalMessageInfo func (m *PluginGenerator) Reset() { *m = PluginGenerator{} } func (*PluginGenerator) ProtoMessage() {} func (*PluginGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{100} + return fileDescriptor_030104ce3b95bcac, []int{103} } func (m *PluginGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2872,7 +2956,7 @@ var xxx_messageInfo_PluginGenerator proto.InternalMessageInfo func (m *PluginInput) Reset() { *m = PluginInput{} } func (*PluginInput) ProtoMessage() {} func (*PluginInput) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{101} + return fileDescriptor_030104ce3b95bcac, []int{104} } func (m *PluginInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2900,7 +2984,7 @@ var xxx_messageInfo_PluginInput proto.InternalMessageInfo func (m *ProjectRole) Reset() { *m = ProjectRole{} } func (*ProjectRole) ProtoMessage() {} func (*ProjectRole) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{102} + return fileDescriptor_030104ce3b95bcac, []int{105} } func (m *ProjectRole) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2928,7 +3012,7 @@ var xxx_messageInfo_ProjectRole proto.InternalMessageInfo func (m *PullRequestGenerator) Reset() { *m = PullRequestGenerator{} } func (*PullRequestGenerator) ProtoMessage() {} func (*PullRequestGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{103} + return fileDescriptor_030104ce3b95bcac, []int{106} } func (m *PullRequestGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2956,7 +3040,7 @@ var xxx_messageInfo_PullRequestGenerator proto.InternalMessageInfo func (m *PullRequestGeneratorAzureDevOps) Reset() { *m = PullRequestGeneratorAzureDevOps{} } func (*PullRequestGeneratorAzureDevOps) ProtoMessage() {} func (*PullRequestGeneratorAzureDevOps) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{104} + return fileDescriptor_030104ce3b95bcac, []int{107} } func (m *PullRequestGeneratorAzureDevOps) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2984,7 +3068,7 @@ var xxx_messageInfo_PullRequestGeneratorAzureDevOps proto.InternalMessageInfo func (m *PullRequestGeneratorBitbucket) Reset() { *m = PullRequestGeneratorBitbucket{} } func (*PullRequestGeneratorBitbucket) ProtoMessage() {} func (*PullRequestGeneratorBitbucket) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{105} + return fileDescriptor_030104ce3b95bcac, []int{108} } func (m *PullRequestGeneratorBitbucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3012,7 +3096,7 @@ var xxx_messageInfo_PullRequestGeneratorBitbucket proto.InternalMessageInfo func (m *PullRequestGeneratorBitbucketServer) Reset() { *m = PullRequestGeneratorBitbucketServer{} } func (*PullRequestGeneratorBitbucketServer) ProtoMessage() {} func (*PullRequestGeneratorBitbucketServer) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{106} + return fileDescriptor_030104ce3b95bcac, []int{109} } func (m *PullRequestGeneratorBitbucketServer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3040,7 +3124,7 @@ var xxx_messageInfo_PullRequestGeneratorBitbucketServer proto.InternalMessageInf func (m *PullRequestGeneratorFilter) Reset() { *m = PullRequestGeneratorFilter{} } func (*PullRequestGeneratorFilter) ProtoMessage() {} func (*PullRequestGeneratorFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{107} + return fileDescriptor_030104ce3b95bcac, []int{110} } func (m *PullRequestGeneratorFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3068,7 +3152,7 @@ var xxx_messageInfo_PullRequestGeneratorFilter proto.InternalMessageInfo func (m *PullRequestGeneratorGitLab) Reset() { *m = PullRequestGeneratorGitLab{} } func (*PullRequestGeneratorGitLab) ProtoMessage() {} func (*PullRequestGeneratorGitLab) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{108} + return fileDescriptor_030104ce3b95bcac, []int{111} } func (m *PullRequestGeneratorGitLab) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3096,7 +3180,7 @@ var xxx_messageInfo_PullRequestGeneratorGitLab proto.InternalMessageInfo func (m *PullRequestGeneratorGitea) Reset() { *m = PullRequestGeneratorGitea{} } func (*PullRequestGeneratorGitea) ProtoMessage() {} func (*PullRequestGeneratorGitea) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{109} + return fileDescriptor_030104ce3b95bcac, []int{112} } func (m *PullRequestGeneratorGitea) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3124,7 +3208,7 @@ var xxx_messageInfo_PullRequestGeneratorGitea proto.InternalMessageInfo func (m *PullRequestGeneratorGithub) Reset() { *m = PullRequestGeneratorGithub{} } func (*PullRequestGeneratorGithub) ProtoMessage() {} func (*PullRequestGeneratorGithub) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{110} + return fileDescriptor_030104ce3b95bcac, []int{113} } func (m *PullRequestGeneratorGithub) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3152,7 +3236,7 @@ var xxx_messageInfo_PullRequestGeneratorGithub proto.InternalMessageInfo func (m *RefTarget) Reset() { *m = RefTarget{} } func (*RefTarget) ProtoMessage() {} func (*RefTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{111} + return fileDescriptor_030104ce3b95bcac, []int{114} } func (m *RefTarget) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3180,7 +3264,7 @@ var xxx_messageInfo_RefTarget proto.InternalMessageInfo func (m *RepoCreds) Reset() { *m = RepoCreds{} } func (*RepoCreds) ProtoMessage() {} func (*RepoCreds) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{112} + return fileDescriptor_030104ce3b95bcac, []int{115} } func (m *RepoCreds) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3208,7 +3292,7 @@ var xxx_messageInfo_RepoCreds proto.InternalMessageInfo func (m *RepoCredsList) Reset() { *m = RepoCredsList{} } func (*RepoCredsList) ProtoMessage() {} func (*RepoCredsList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{113} + return fileDescriptor_030104ce3b95bcac, []int{116} } func (m *RepoCredsList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3236,7 +3320,7 @@ var xxx_messageInfo_RepoCredsList proto.InternalMessageInfo func (m *Repository) Reset() { *m = Repository{} } func (*Repository) ProtoMessage() {} func (*Repository) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{114} + return fileDescriptor_030104ce3b95bcac, []int{117} } func (m *Repository) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3264,7 +3348,7 @@ var xxx_messageInfo_Repository proto.InternalMessageInfo func (m *RepositoryCertificate) Reset() { *m = RepositoryCertificate{} } func (*RepositoryCertificate) ProtoMessage() {} func (*RepositoryCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{115} + return fileDescriptor_030104ce3b95bcac, []int{118} } func (m *RepositoryCertificate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3292,7 +3376,7 @@ var xxx_messageInfo_RepositoryCertificate proto.InternalMessageInfo func (m *RepositoryCertificateList) Reset() { *m = RepositoryCertificateList{} } func (*RepositoryCertificateList) ProtoMessage() {} func (*RepositoryCertificateList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{116} + return fileDescriptor_030104ce3b95bcac, []int{119} } func (m *RepositoryCertificateList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3320,7 +3404,7 @@ var xxx_messageInfo_RepositoryCertificateList proto.InternalMessageInfo func (m *RepositoryList) Reset() { *m = RepositoryList{} } func (*RepositoryList) ProtoMessage() {} func (*RepositoryList) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{117} + return fileDescriptor_030104ce3b95bcac, []int{120} } func (m *RepositoryList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3348,7 +3432,7 @@ var xxx_messageInfo_RepositoryList proto.InternalMessageInfo func (m *ResourceAction) Reset() { *m = ResourceAction{} } func (*ResourceAction) ProtoMessage() {} func (*ResourceAction) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{118} + return fileDescriptor_030104ce3b95bcac, []int{121} } func (m *ResourceAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3376,7 +3460,7 @@ var xxx_messageInfo_ResourceAction proto.InternalMessageInfo func (m *ResourceActionDefinition) Reset() { *m = ResourceActionDefinition{} } func (*ResourceActionDefinition) ProtoMessage() {} func (*ResourceActionDefinition) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{119} + return fileDescriptor_030104ce3b95bcac, []int{122} } func (m *ResourceActionDefinition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3404,7 +3488,7 @@ var xxx_messageInfo_ResourceActionDefinition proto.InternalMessageInfo func (m *ResourceActionParam) Reset() { *m = ResourceActionParam{} } func (*ResourceActionParam) ProtoMessage() {} func (*ResourceActionParam) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{120} + return fileDescriptor_030104ce3b95bcac, []int{123} } func (m *ResourceActionParam) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3432,7 +3516,7 @@ var xxx_messageInfo_ResourceActionParam proto.InternalMessageInfo func (m *ResourceActions) Reset() { *m = ResourceActions{} } func (*ResourceActions) ProtoMessage() {} func (*ResourceActions) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{121} + return fileDescriptor_030104ce3b95bcac, []int{124} } func (m *ResourceActions) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3460,7 +3544,7 @@ var xxx_messageInfo_ResourceActions proto.InternalMessageInfo func (m *ResourceDiff) Reset() { *m = ResourceDiff{} } func (*ResourceDiff) ProtoMessage() {} func (*ResourceDiff) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{122} + return fileDescriptor_030104ce3b95bcac, []int{125} } func (m *ResourceDiff) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3488,7 +3572,7 @@ var xxx_messageInfo_ResourceDiff proto.InternalMessageInfo func (m *ResourceIgnoreDifferences) Reset() { *m = ResourceIgnoreDifferences{} } func (*ResourceIgnoreDifferences) ProtoMessage() {} func (*ResourceIgnoreDifferences) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{123} + return fileDescriptor_030104ce3b95bcac, []int{126} } func (m *ResourceIgnoreDifferences) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3516,7 +3600,7 @@ var xxx_messageInfo_ResourceIgnoreDifferences proto.InternalMessageInfo func (m *ResourceNetworkingInfo) Reset() { *m = ResourceNetworkingInfo{} } func (*ResourceNetworkingInfo) ProtoMessage() {} func (*ResourceNetworkingInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{124} + return fileDescriptor_030104ce3b95bcac, []int{127} } func (m *ResourceNetworkingInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3544,7 +3628,7 @@ var xxx_messageInfo_ResourceNetworkingInfo proto.InternalMessageInfo func (m *ResourceNode) Reset() { *m = ResourceNode{} } func (*ResourceNode) ProtoMessage() {} func (*ResourceNode) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{125} + return fileDescriptor_030104ce3b95bcac, []int{128} } func (m *ResourceNode) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3572,7 +3656,7 @@ var xxx_messageInfo_ResourceNode proto.InternalMessageInfo func (m *ResourceOverride) Reset() { *m = ResourceOverride{} } func (*ResourceOverride) ProtoMessage() {} func (*ResourceOverride) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{126} + return fileDescriptor_030104ce3b95bcac, []int{129} } func (m *ResourceOverride) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3600,7 +3684,7 @@ var xxx_messageInfo_ResourceOverride proto.InternalMessageInfo func (m *ResourceRef) Reset() { *m = ResourceRef{} } func (*ResourceRef) ProtoMessage() {} func (*ResourceRef) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{127} + return fileDescriptor_030104ce3b95bcac, []int{130} } func (m *ResourceRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3628,7 +3712,7 @@ var xxx_messageInfo_ResourceRef proto.InternalMessageInfo func (m *ResourceResult) Reset() { *m = ResourceResult{} } func (*ResourceResult) ProtoMessage() {} func (*ResourceResult) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{128} + return fileDescriptor_030104ce3b95bcac, []int{131} } func (m *ResourceResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3656,7 +3740,7 @@ var xxx_messageInfo_ResourceResult proto.InternalMessageInfo func (m *ResourceStatus) Reset() { *m = ResourceStatus{} } func (*ResourceStatus) ProtoMessage() {} func (*ResourceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{129} + return fileDescriptor_030104ce3b95bcac, []int{132} } func (m *ResourceStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3684,7 +3768,7 @@ var xxx_messageInfo_ResourceStatus proto.InternalMessageInfo func (m *RetryStrategy) Reset() { *m = RetryStrategy{} } func (*RetryStrategy) ProtoMessage() {} func (*RetryStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{130} + return fileDescriptor_030104ce3b95bcac, []int{133} } func (m *RetryStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3712,7 +3796,7 @@ var xxx_messageInfo_RetryStrategy proto.InternalMessageInfo func (m *RevisionHistory) Reset() { *m = RevisionHistory{} } func (*RevisionHistory) ProtoMessage() {} func (*RevisionHistory) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{131} + return fileDescriptor_030104ce3b95bcac, []int{134} } func (m *RevisionHistory) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3740,7 +3824,7 @@ var xxx_messageInfo_RevisionHistory proto.InternalMessageInfo func (m *RevisionMetadata) Reset() { *m = RevisionMetadata{} } func (*RevisionMetadata) ProtoMessage() {} func (*RevisionMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{132} + return fileDescriptor_030104ce3b95bcac, []int{135} } func (m *RevisionMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3768,7 +3852,7 @@ var xxx_messageInfo_RevisionMetadata proto.InternalMessageInfo func (m *SCMProviderGenerator) Reset() { *m = SCMProviderGenerator{} } func (*SCMProviderGenerator) ProtoMessage() {} func (*SCMProviderGenerator) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{133} + return fileDescriptor_030104ce3b95bcac, []int{136} } func (m *SCMProviderGenerator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3796,7 +3880,7 @@ var xxx_messageInfo_SCMProviderGenerator proto.InternalMessageInfo func (m *SCMProviderGeneratorAWSCodeCommit) Reset() { *m = SCMProviderGeneratorAWSCodeCommit{} } func (*SCMProviderGeneratorAWSCodeCommit) ProtoMessage() {} func (*SCMProviderGeneratorAWSCodeCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{134} + return fileDescriptor_030104ce3b95bcac, []int{137} } func (m *SCMProviderGeneratorAWSCodeCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3824,7 +3908,7 @@ var xxx_messageInfo_SCMProviderGeneratorAWSCodeCommit proto.InternalMessageInfo func (m *SCMProviderGeneratorAzureDevOps) Reset() { *m = SCMProviderGeneratorAzureDevOps{} } func (*SCMProviderGeneratorAzureDevOps) ProtoMessage() {} func (*SCMProviderGeneratorAzureDevOps) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{135} + return fileDescriptor_030104ce3b95bcac, []int{138} } func (m *SCMProviderGeneratorAzureDevOps) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3852,7 +3936,7 @@ var xxx_messageInfo_SCMProviderGeneratorAzureDevOps proto.InternalMessageInfo func (m *SCMProviderGeneratorBitbucket) Reset() { *m = SCMProviderGeneratorBitbucket{} } func (*SCMProviderGeneratorBitbucket) ProtoMessage() {} func (*SCMProviderGeneratorBitbucket) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{136} + return fileDescriptor_030104ce3b95bcac, []int{139} } func (m *SCMProviderGeneratorBitbucket) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3880,7 +3964,7 @@ var xxx_messageInfo_SCMProviderGeneratorBitbucket proto.InternalMessageInfo func (m *SCMProviderGeneratorBitbucketServer) Reset() { *m = SCMProviderGeneratorBitbucketServer{} } func (*SCMProviderGeneratorBitbucketServer) ProtoMessage() {} func (*SCMProviderGeneratorBitbucketServer) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{137} + return fileDescriptor_030104ce3b95bcac, []int{140} } func (m *SCMProviderGeneratorBitbucketServer) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3908,7 +3992,7 @@ var xxx_messageInfo_SCMProviderGeneratorBitbucketServer proto.InternalMessageInf func (m *SCMProviderGeneratorFilter) Reset() { *m = SCMProviderGeneratorFilter{} } func (*SCMProviderGeneratorFilter) ProtoMessage() {} func (*SCMProviderGeneratorFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{138} + return fileDescriptor_030104ce3b95bcac, []int{141} } func (m *SCMProviderGeneratorFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3936,7 +4020,7 @@ var xxx_messageInfo_SCMProviderGeneratorFilter proto.InternalMessageInfo func (m *SCMProviderGeneratorGitea) Reset() { *m = SCMProviderGeneratorGitea{} } func (*SCMProviderGeneratorGitea) ProtoMessage() {} func (*SCMProviderGeneratorGitea) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{139} + return fileDescriptor_030104ce3b95bcac, []int{142} } func (m *SCMProviderGeneratorGitea) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3964,7 +4048,7 @@ var xxx_messageInfo_SCMProviderGeneratorGitea proto.InternalMessageInfo func (m *SCMProviderGeneratorGithub) Reset() { *m = SCMProviderGeneratorGithub{} } func (*SCMProviderGeneratorGithub) ProtoMessage() {} func (*SCMProviderGeneratorGithub) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{140} + return fileDescriptor_030104ce3b95bcac, []int{143} } func (m *SCMProviderGeneratorGithub) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3992,7 +4076,7 @@ var xxx_messageInfo_SCMProviderGeneratorGithub proto.InternalMessageInfo func (m *SCMProviderGeneratorGitlab) Reset() { *m = SCMProviderGeneratorGitlab{} } func (*SCMProviderGeneratorGitlab) ProtoMessage() {} func (*SCMProviderGeneratorGitlab) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{141} + return fileDescriptor_030104ce3b95bcac, []int{144} } func (m *SCMProviderGeneratorGitlab) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4020,7 +4104,7 @@ var xxx_messageInfo_SCMProviderGeneratorGitlab proto.InternalMessageInfo func (m *SecretRef) Reset() { *m = SecretRef{} } func (*SecretRef) ProtoMessage() {} func (*SecretRef) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{142} + return fileDescriptor_030104ce3b95bcac, []int{145} } func (m *SecretRef) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4048,7 +4132,7 @@ var xxx_messageInfo_SecretRef proto.InternalMessageInfo func (m *SignatureKey) Reset() { *m = SignatureKey{} } func (*SignatureKey) ProtoMessage() {} func (*SignatureKey) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{143} + return fileDescriptor_030104ce3b95bcac, []int{146} } func (m *SignatureKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4073,10 +4157,94 @@ func (m *SignatureKey) XXX_DiscardUnknown() { var xxx_messageInfo_SignatureKey proto.InternalMessageInfo +func (m *SourceHydrator) Reset() { *m = SourceHydrator{} } +func (*SourceHydrator) ProtoMessage() {} +func (*SourceHydrator) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{147} +} +func (m *SourceHydrator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceHydrator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceHydrator) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceHydrator.Merge(m, src) +} +func (m *SourceHydrator) XXX_Size() int { + return m.Size() +} +func (m *SourceHydrator) XXX_DiscardUnknown() { + xxx_messageInfo_SourceHydrator.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceHydrator proto.InternalMessageInfo + +func (m *SourceHydratorStatus) Reset() { *m = SourceHydratorStatus{} } +func (*SourceHydratorStatus) ProtoMessage() {} +func (*SourceHydratorStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{148} +} +func (m *SourceHydratorStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceHydratorStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceHydratorStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceHydratorStatus.Merge(m, src) +} +func (m *SourceHydratorStatus) XXX_Size() int { + return m.Size() +} +func (m *SourceHydratorStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SourceHydratorStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceHydratorStatus proto.InternalMessageInfo + +func (m *SuccessfulHydrateOperation) Reset() { *m = SuccessfulHydrateOperation{} } +func (*SuccessfulHydrateOperation) ProtoMessage() {} +func (*SuccessfulHydrateOperation) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{149} +} +func (m *SuccessfulHydrateOperation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SuccessfulHydrateOperation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SuccessfulHydrateOperation) XXX_Merge(src proto.Message) { + xxx_messageInfo_SuccessfulHydrateOperation.Merge(m, src) +} +func (m *SuccessfulHydrateOperation) XXX_Size() int { + return m.Size() +} +func (m *SuccessfulHydrateOperation) XXX_DiscardUnknown() { + xxx_messageInfo_SuccessfulHydrateOperation.DiscardUnknown(m) +} + +var xxx_messageInfo_SuccessfulHydrateOperation proto.InternalMessageInfo + func (m *SyncOperation) Reset() { *m = SyncOperation{} } func (*SyncOperation) ProtoMessage() {} func (*SyncOperation) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{144} + return fileDescriptor_030104ce3b95bcac, []int{150} } func (m *SyncOperation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4104,7 +4272,7 @@ var xxx_messageInfo_SyncOperation proto.InternalMessageInfo func (m *SyncOperationResource) Reset() { *m = SyncOperationResource{} } func (*SyncOperationResource) ProtoMessage() {} func (*SyncOperationResource) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{145} + return fileDescriptor_030104ce3b95bcac, []int{151} } func (m *SyncOperationResource) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4132,7 +4300,7 @@ var xxx_messageInfo_SyncOperationResource proto.InternalMessageInfo func (m *SyncOperationResult) Reset() { *m = SyncOperationResult{} } func (*SyncOperationResult) ProtoMessage() {} func (*SyncOperationResult) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{146} + return fileDescriptor_030104ce3b95bcac, []int{152} } func (m *SyncOperationResult) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4160,7 +4328,7 @@ var xxx_messageInfo_SyncOperationResult proto.InternalMessageInfo func (m *SyncPolicy) Reset() { *m = SyncPolicy{} } func (*SyncPolicy) ProtoMessage() {} func (*SyncPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{147} + return fileDescriptor_030104ce3b95bcac, []int{153} } func (m *SyncPolicy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4188,7 +4356,7 @@ var xxx_messageInfo_SyncPolicy proto.InternalMessageInfo func (m *SyncPolicyAutomated) Reset() { *m = SyncPolicyAutomated{} } func (*SyncPolicyAutomated) ProtoMessage() {} func (*SyncPolicyAutomated) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{148} + return fileDescriptor_030104ce3b95bcac, []int{154} } func (m *SyncPolicyAutomated) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4213,10 +4381,38 @@ func (m *SyncPolicyAutomated) XXX_DiscardUnknown() { var xxx_messageInfo_SyncPolicyAutomated proto.InternalMessageInfo +func (m *SyncSource) Reset() { *m = SyncSource{} } +func (*SyncSource) ProtoMessage() {} +func (*SyncSource) Descriptor() ([]byte, []int) { + return fileDescriptor_030104ce3b95bcac, []int{155} +} +func (m *SyncSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SyncSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SyncSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SyncSource.Merge(m, src) +} +func (m *SyncSource) XXX_Size() int { + return m.Size() +} +func (m *SyncSource) XXX_DiscardUnknown() { + xxx_messageInfo_SyncSource.DiscardUnknown(m) +} + +var xxx_messageInfo_SyncSource proto.InternalMessageInfo + func (m *SyncStatus) Reset() { *m = SyncStatus{} } func (*SyncStatus) ProtoMessage() {} func (*SyncStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{149} + return fileDescriptor_030104ce3b95bcac, []int{156} } func (m *SyncStatus) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4244,7 +4440,7 @@ var xxx_messageInfo_SyncStatus proto.InternalMessageInfo func (m *SyncStrategy) Reset() { *m = SyncStrategy{} } func (*SyncStrategy) ProtoMessage() {} func (*SyncStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{150} + return fileDescriptor_030104ce3b95bcac, []int{157} } func (m *SyncStrategy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4272,7 +4468,7 @@ var xxx_messageInfo_SyncStrategy proto.InternalMessageInfo func (m *SyncStrategyApply) Reset() { *m = SyncStrategyApply{} } func (*SyncStrategyApply) ProtoMessage() {} func (*SyncStrategyApply) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{151} + return fileDescriptor_030104ce3b95bcac, []int{158} } func (m *SyncStrategyApply) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4300,7 +4496,7 @@ var xxx_messageInfo_SyncStrategyApply proto.InternalMessageInfo func (m *SyncStrategyHook) Reset() { *m = SyncStrategyHook{} } func (*SyncStrategyHook) ProtoMessage() {} func (*SyncStrategyHook) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{152} + return fileDescriptor_030104ce3b95bcac, []int{159} } func (m *SyncStrategyHook) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4328,7 +4524,7 @@ var xxx_messageInfo_SyncStrategyHook proto.InternalMessageInfo func (m *SyncWindow) Reset() { *m = SyncWindow{} } func (*SyncWindow) ProtoMessage() {} func (*SyncWindow) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{153} + return fileDescriptor_030104ce3b95bcac, []int{160} } func (m *SyncWindow) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4356,7 +4552,7 @@ var xxx_messageInfo_SyncWindow proto.InternalMessageInfo func (m *TLSClientConfig) Reset() { *m = TLSClientConfig{} } func (*TLSClientConfig) ProtoMessage() {} func (*TLSClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{154} + return fileDescriptor_030104ce3b95bcac, []int{161} } func (m *TLSClientConfig) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4384,7 +4580,7 @@ var xxx_messageInfo_TLSClientConfig proto.InternalMessageInfo func (m *TagFilter) Reset() { *m = TagFilter{} } func (*TagFilter) ProtoMessage() {} func (*TagFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_030104ce3b95bcac, []int{155} + return fileDescriptor_030104ce3b95bcac, []int{162} } func (m *TagFilter) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4476,6 +4672,7 @@ func init() { proto.RegisterType((*ConfigManagementPlugin)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ConfigManagementPlugin") proto.RegisterType((*ConfigMapKeyRef)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ConfigMapKeyRef") proto.RegisterType((*ConnectionState)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.ConnectionState") + proto.RegisterType((*DrySource)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.DrySource") proto.RegisterType((*DuckTypeGenerator)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.DuckTypeGenerator") proto.RegisterMapType((map[string]string)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.DuckTypeGenerator.ValuesEntry") proto.RegisterType((*EnvEntry)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.EnvEntry") @@ -4494,6 +4691,8 @@ func init() { proto.RegisterType((*HelmParameter)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.HelmParameter") proto.RegisterType((*HostInfo)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.HostInfo") proto.RegisterType((*HostResourceInfo)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.HostResourceInfo") + proto.RegisterType((*HydrateOperation)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.HydrateOperation") + proto.RegisterType((*HydrateTo)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.HydrateTo") proto.RegisterType((*Info)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.Info") proto.RegisterType((*InfoItem)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.InfoItem") proto.RegisterType((*JWTToken)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.JWTToken") @@ -4574,11 +4773,15 @@ func init() { proto.RegisterType((*SCMProviderGeneratorGitlab)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SCMProviderGeneratorGitlab") proto.RegisterType((*SecretRef)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SecretRef") proto.RegisterType((*SignatureKey)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SignatureKey") + proto.RegisterType((*SourceHydrator)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SourceHydrator") + proto.RegisterType((*SourceHydratorStatus)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SourceHydratorStatus") + proto.RegisterType((*SuccessfulHydrateOperation)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SuccessfulHydrateOperation") proto.RegisterType((*SyncOperation)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SyncOperation") proto.RegisterType((*SyncOperationResource)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SyncOperationResource") proto.RegisterType((*SyncOperationResult)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SyncOperationResult") proto.RegisterType((*SyncPolicy)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SyncPolicy") proto.RegisterType((*SyncPolicyAutomated)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SyncPolicyAutomated") + proto.RegisterType((*SyncSource)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SyncSource") proto.RegisterType((*SyncStatus)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SyncStatus") proto.RegisterType((*SyncStrategy)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SyncStrategy") proto.RegisterType((*SyncStrategyApply)(nil), "github.com.argoproj.argo_cd.v2.pkg.apis.application.v1alpha1.SyncStrategyApply") @@ -4593,719 +4796,749 @@ func init() { } var fileDescriptor_030104ce3b95bcac = []byte{ - // 11387 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x6f, 0x70, 0x1c, 0xc9, - 0x75, 0x18, 0xae, 0xd9, 0xc5, 0x02, 0xbb, 0x0f, 0xff, 0x88, 0x26, 0x79, 0x07, 0x52, 0x77, 0x07, - 0x7a, 0xce, 0x3e, 0x9d, 0x7e, 0xba, 0x03, 0x7c, 0xd4, 0x9d, 0x7c, 0x3f, 0x9d, 0x25, 0x19, 0x7f, - 0x48, 0x10, 0x24, 0x40, 0xe0, 0x1a, 0x20, 0x29, 0x9d, 0x7c, 0x3a, 0x0d, 0x66, 0x1b, 0x8b, 0x21, - 0x66, 0x67, 0xf6, 0x66, 0x66, 0x41, 0xe2, 0x2c, 0xc9, 0x92, 0x25, 0xd9, 0x72, 0xf4, 0xe7, 0x14, - 0x29, 0x55, 0x39, 0x27, 0x96, 0x22, 0x5b, 0x4e, 0x2a, 0xa9, 0x94, 0x2a, 0x4a, 0xf2, 0x21, 0x4e, - 0x6c, 0x97, 0x2b, 0x76, 0xca, 0xa5, 0xc4, 0x49, 0xd9, 0xa5, 0x52, 0x59, 0x4a, 0x62, 0x23, 0x12, - 0xe3, 0x94, 0x53, 0xf9, 0xe0, 0xaa, 0x38, 0xf9, 0x90, 0x62, 0xf2, 0x21, 0xd5, 0xff, 0x7b, 0x66, - 0x67, 0x81, 0x05, 0x31, 0x00, 0x29, 0xe5, 0xbe, 0xed, 0xf6, 0x7b, 0xdd, 0xaf, 0xa7, 0xff, 0xbc, - 0xf7, 0xfa, 0xf5, 0x7b, 0xaf, 0x61, 0xb1, 0xe1, 0x25, 0x9b, 0xed, 0xf5, 0x49, 0x37, 0x6c, 0x4e, - 0x39, 0x51, 0x23, 0x6c, 0x45, 0xe1, 0x4d, 0xf6, 0xe3, 0x69, 0xb7, 0x3e, 0xb5, 0x7d, 0x7e, 0xaa, - 0xb5, 0xd5, 0x98, 0x72, 0x5a, 0x5e, 0x3c, 0xe5, 0xb4, 0x5a, 0xbe, 0xe7, 0x3a, 0x89, 0x17, 0x06, - 0x53, 0xdb, 0xcf, 0x38, 0x7e, 0x6b, 0xd3, 0x79, 0x66, 0xaa, 0x41, 0x02, 0x12, 0x39, 0x09, 0xa9, - 0x4f, 0xb6, 0xa2, 0x30, 0x09, 0xd1, 0x4f, 0xeb, 0xd6, 0x26, 0x65, 0x6b, 0xec, 0xc7, 0x2b, 0x6e, - 0x7d, 0x72, 0xfb, 0xfc, 0x64, 0x6b, 0xab, 0x31, 0x49, 0x5b, 0x9b, 0x34, 0x5a, 0x9b, 0x94, 0xad, - 0x9d, 0x7d, 0xda, 0xe8, 0x4b, 0x23, 0x6c, 0x84, 0x53, 0xac, 0xd1, 0xf5, 0xf6, 0x06, 0xfb, 0xc7, - 0xfe, 0xb0, 0x5f, 0x9c, 0xd8, 0x59, 0x7b, 0xeb, 0xf9, 0x78, 0xd2, 0x0b, 0x69, 0xf7, 0xa6, 0xdc, - 0x30, 0x22, 0x53, 0xdb, 0x1d, 0x1d, 0x3a, 0x7b, 0x49, 0xe3, 0x90, 0xdb, 0x09, 0x09, 0x62, 0x2f, - 0x0c, 0xe2, 0xa7, 0x69, 0x17, 0x48, 0xb4, 0x4d, 0x22, 0xf3, 0xf3, 0x0c, 0x84, 0xbc, 0x96, 0x9e, - 0xd5, 0x2d, 0x35, 0x1d, 0x77, 0xd3, 0x0b, 0x48, 0xb4, 0xa3, 0xab, 0x37, 0x49, 0xe2, 0xe4, 0xd5, - 0x9a, 0xea, 0x56, 0x2b, 0x6a, 0x07, 0x89, 0xd7, 0x24, 0x1d, 0x15, 0xde, 0xb5, 0x5f, 0x85, 0xd8, - 0xdd, 0x24, 0x4d, 0xa7, 0xa3, 0xde, 0x3b, 0xbb, 0xd5, 0x6b, 0x27, 0x9e, 0x3f, 0xe5, 0x05, 0x49, - 0x9c, 0x44, 0xd9, 0x4a, 0xf6, 0xaf, 0x5a, 0x30, 0x3c, 0x7d, 0x63, 0x75, 0xba, 0x9d, 0x6c, 0xce, - 0x86, 0xc1, 0x86, 0xd7, 0x40, 0xcf, 0xc1, 0xa0, 0xeb, 0xb7, 0xe3, 0x84, 0x44, 0x57, 0x9d, 0x26, - 0x19, 0xb7, 0xce, 0x59, 0x4f, 0xd6, 0x66, 0x4e, 0x7e, 0x6b, 0x77, 0xe2, 0x2d, 0x77, 0x76, 0x27, - 0x06, 0x67, 0x35, 0x08, 0x9b, 0x78, 0xe8, 0xed, 0x30, 0x10, 0x85, 0x3e, 0x99, 0xc6, 0x57, 0xc7, - 0x4b, 0xac, 0xca, 0xa8, 0xa8, 0x32, 0x80, 0x79, 0x31, 0x96, 0x70, 0x8a, 0xda, 0x8a, 0xc2, 0x0d, - 0xcf, 0x27, 0xe3, 0xe5, 0x34, 0xea, 0x0a, 0x2f, 0xc6, 0x12, 0x6e, 0xff, 0x49, 0x09, 0x60, 0xba, - 0xd5, 0x5a, 0x89, 0xc2, 0x9b, 0xc4, 0x4d, 0xd0, 0x87, 0xa1, 0x4a, 0x87, 0xb9, 0xee, 0x24, 0x0e, - 0xeb, 0xd8, 0xe0, 0xf9, 0x9f, 0x9c, 0xe4, 0x5f, 0x3d, 0x69, 0x7e, 0xb5, 0x5e, 0x64, 0x14, 0x7b, - 0x72, 0xfb, 0x99, 0xc9, 0xe5, 0x75, 0x5a, 0x7f, 0x89, 0x24, 0xce, 0x0c, 0x12, 0xc4, 0x40, 0x97, - 0x61, 0xd5, 0x2a, 0x0a, 0xa0, 0x2f, 0x6e, 0x11, 0x97, 0x7d, 0xc3, 0xe0, 0xf9, 0xc5, 0xc9, 0xc3, - 0xac, 0xe6, 0x49, 0xdd, 0xf3, 0xd5, 0x16, 0x71, 0x67, 0x86, 0x04, 0xe5, 0x3e, 0xfa, 0x0f, 0x33, - 0x3a, 0x68, 0x1b, 0xfa, 0xe3, 0xc4, 0x49, 0xda, 0x31, 0x1b, 0x8a, 0xc1, 0xf3, 0x57, 0x0b, 0xa3, - 0xc8, 0x5a, 0x9d, 0x19, 0x11, 0x34, 0xfb, 0xf9, 0x7f, 0x2c, 0xa8, 0xd9, 0x7f, 0x66, 0xc1, 0x88, - 0x46, 0x5e, 0xf4, 0xe2, 0x04, 0xfd, 0x6c, 0xc7, 0xe0, 0x4e, 0xf6, 0x36, 0xb8, 0xb4, 0x36, 0x1b, - 0xda, 0x13, 0x82, 0x58, 0x55, 0x96, 0x18, 0x03, 0xdb, 0x84, 0x8a, 0x97, 0x90, 0x66, 0x3c, 0x5e, - 0x3a, 0x57, 0x7e, 0x72, 0xf0, 0xfc, 0xa5, 0xa2, 0xbe, 0x73, 0x66, 0x58, 0x10, 0xad, 0x2c, 0xd0, - 0xe6, 0x31, 0xa7, 0x62, 0xff, 0xd5, 0xb0, 0xf9, 0x7d, 0x74, 0xc0, 0xd1, 0x33, 0x30, 0x18, 0x87, - 0xed, 0xc8, 0x25, 0x98, 0xb4, 0xc2, 0x78, 0xdc, 0x3a, 0x57, 0xa6, 0x4b, 0x8f, 0x2e, 0xea, 0x55, - 0x5d, 0x8c, 0x4d, 0x1c, 0xf4, 0x05, 0x0b, 0x86, 0xea, 0x24, 0x4e, 0xbc, 0x80, 0xd1, 0x97, 0x9d, - 0x5f, 0x3b, 0x74, 0xe7, 0x65, 0xe1, 0x9c, 0x6e, 0x7c, 0xe6, 0x94, 0xf8, 0x90, 0x21, 0xa3, 0x30, - 0xc6, 0x29, 0xfa, 0x74, 0x73, 0xd6, 0x49, 0xec, 0x46, 0x5e, 0x8b, 0xfe, 0x17, 0xdb, 0x47, 0x6d, - 0xce, 0x39, 0x0d, 0xc2, 0x26, 0x1e, 0x0a, 0xa0, 0x42, 0x37, 0x5f, 0x3c, 0xde, 0xc7, 0xfa, 0xbf, - 0x70, 0xb8, 0xfe, 0x8b, 0x41, 0xa5, 0xfb, 0x5a, 0x8f, 0x3e, 0xfd, 0x17, 0x63, 0x4e, 0x06, 0x7d, - 0xde, 0x82, 0x71, 0xc1, 0x1c, 0x30, 0xe1, 0x03, 0x7a, 0x63, 0xd3, 0x4b, 0x88, 0xef, 0xc5, 0xc9, - 0x78, 0x85, 0xf5, 0x61, 0xaa, 0xb7, 0xb5, 0x35, 0x1f, 0x85, 0xed, 0xd6, 0x15, 0x2f, 0xa8, 0xcf, - 0x9c, 0x13, 0x94, 0xc6, 0x67, 0xbb, 0x34, 0x8c, 0xbb, 0x92, 0x44, 0x5f, 0xb6, 0xe0, 0x6c, 0xe0, - 0x34, 0x49, 0xdc, 0x72, 0xe8, 0xd4, 0x72, 0xf0, 0x8c, 0xef, 0xb8, 0x5b, 0xac, 0x47, 0xfd, 0xf7, - 0xd6, 0x23, 0x5b, 0xf4, 0xe8, 0xec, 0xd5, 0xae, 0x4d, 0xe3, 0x3d, 0xc8, 0xa2, 0xaf, 0x5b, 0x30, - 0x16, 0x46, 0xad, 0x4d, 0x27, 0x20, 0x75, 0x09, 0x8d, 0xc7, 0x07, 0xd8, 0xd6, 0xfb, 0xd0, 0xe1, - 0xa6, 0x68, 0x39, 0xdb, 0xec, 0x52, 0x18, 0x78, 0x49, 0x18, 0xad, 0x92, 0x24, 0xf1, 0x82, 0x46, - 0x3c, 0x73, 0xfa, 0xce, 0xee, 0xc4, 0x58, 0x07, 0x16, 0xee, 0xec, 0x0f, 0xfa, 0x39, 0x18, 0x8c, - 0x77, 0x02, 0xf7, 0x86, 0x17, 0xd4, 0xc3, 0x5b, 0xf1, 0x78, 0xb5, 0x88, 0xed, 0xbb, 0xaa, 0x1a, - 0x14, 0x1b, 0x50, 0x13, 0xc0, 0x26, 0xb5, 0xfc, 0x89, 0xd3, 0x4b, 0xa9, 0x56, 0xf4, 0xc4, 0xe9, - 0xc5, 0xb4, 0x07, 0x59, 0xf4, 0x4b, 0x16, 0x0c, 0xc7, 0x5e, 0x23, 0x70, 0x92, 0x76, 0x44, 0xae, - 0x90, 0x9d, 0x78, 0x1c, 0x58, 0x47, 0x2e, 0x1f, 0x72, 0x54, 0x8c, 0x26, 0x67, 0x4e, 0x8b, 0x3e, - 0x0e, 0x9b, 0xa5, 0x31, 0x4e, 0xd3, 0xcd, 0xdb, 0x68, 0x7a, 0x59, 0x0f, 0x16, 0xbb, 0xd1, 0xf4, - 0xa2, 0xee, 0x4a, 0x12, 0xfd, 0x0c, 0x9c, 0xe0, 0x45, 0x6a, 0x64, 0xe3, 0xf1, 0x21, 0xc6, 0x68, - 0x4f, 0xdd, 0xd9, 0x9d, 0x38, 0xb1, 0x9a, 0x81, 0xe1, 0x0e, 0x6c, 0xf4, 0x2a, 0x4c, 0xb4, 0x48, - 0xd4, 0xf4, 0x92, 0xe5, 0xc0, 0xdf, 0x91, 0xec, 0xdb, 0x0d, 0x5b, 0xa4, 0x2e, 0xba, 0x13, 0x8f, - 0x0f, 0x9f, 0xb3, 0x9e, 0xac, 0xce, 0xbc, 0x4d, 0x74, 0x73, 0x62, 0x65, 0x6f, 0x74, 0xbc, 0x5f, - 0x7b, 0xe8, 0x0f, 0x2c, 0x38, 0x6b, 0x70, 0xd9, 0x55, 0x12, 0x6d, 0x7b, 0x2e, 0x99, 0x76, 0xdd, - 0xb0, 0x1d, 0x24, 0xf1, 0xf8, 0x08, 0x1b, 0xc6, 0xf5, 0xa3, 0xe0, 0xf9, 0x69, 0x52, 0x7a, 0x5d, - 0x76, 0x45, 0x89, 0xf1, 0x1e, 0x3d, 0xb5, 0xff, 0x75, 0x09, 0x4e, 0x64, 0x35, 0x00, 0xf4, 0xf7, - 0x2c, 0x18, 0xbd, 0x79, 0x2b, 0x59, 0x0b, 0xb7, 0x48, 0x10, 0xcf, 0xec, 0x50, 0x3e, 0xcd, 0x64, - 0xdf, 0xe0, 0x79, 0xb7, 0x58, 0x5d, 0x63, 0xf2, 0x72, 0x9a, 0xca, 0x85, 0x20, 0x89, 0x76, 0x66, - 0x1e, 0x16, 0xdf, 0x34, 0x7a, 0xf9, 0xc6, 0x9a, 0x09, 0xc5, 0xd9, 0x4e, 0x9d, 0xfd, 0xac, 0x05, - 0xa7, 0xf2, 0x9a, 0x40, 0x27, 0xa0, 0xbc, 0x45, 0x76, 0xb8, 0x26, 0x8a, 0xe9, 0x4f, 0xf4, 0x32, - 0x54, 0xb6, 0x1d, 0xbf, 0x4d, 0x84, 0x9a, 0x36, 0x7f, 0xb8, 0x0f, 0x51, 0x3d, 0xc3, 0xbc, 0xd5, - 0x77, 0x97, 0x9e, 0xb7, 0xec, 0x3f, 0x2a, 0xc3, 0xa0, 0x31, 0x69, 0xc7, 0xa0, 0x7a, 0x86, 0x29, - 0xd5, 0x73, 0xa9, 0xb0, 0xf5, 0xd6, 0x55, 0xf7, 0xbc, 0x95, 0xd1, 0x3d, 0x97, 0x8b, 0x23, 0xb9, - 0xa7, 0xf2, 0x89, 0x12, 0xa8, 0x85, 0x2d, 0x7a, 0x0c, 0xa1, 0x3a, 0x4c, 0x5f, 0x11, 0x53, 0xb8, - 0x2c, 0x9b, 0x9b, 0x19, 0xbe, 0xb3, 0x3b, 0x51, 0x53, 0x7f, 0xb1, 0x26, 0x64, 0x7f, 0xd7, 0x82, - 0x53, 0x46, 0x1f, 0x67, 0xc3, 0xa0, 0xee, 0xb1, 0xa9, 0x3d, 0x07, 0x7d, 0xc9, 0x4e, 0x4b, 0x1e, - 0x75, 0xd4, 0x48, 0xad, 0xed, 0xb4, 0x08, 0x66, 0x10, 0x7a, 0x62, 0x69, 0x92, 0x38, 0x76, 0x1a, - 0x24, 0x7b, 0xb8, 0x59, 0xe2, 0xc5, 0x58, 0xc2, 0x51, 0x04, 0xc8, 0x77, 0xe2, 0x64, 0x2d, 0x72, - 0x82, 0x98, 0x35, 0xbf, 0xe6, 0x35, 0x89, 0x18, 0xe0, 0xff, 0xaf, 0xb7, 0x15, 0x43, 0x6b, 0xcc, - 0x3c, 0x74, 0x67, 0x77, 0x02, 0x2d, 0x76, 0xb4, 0x84, 0x73, 0x5a, 0xb7, 0xbf, 0x6c, 0xc1, 0x43, - 0xf9, 0x0c, 0x06, 0x3d, 0x01, 0xfd, 0xfc, 0x9c, 0x2b, 0xbe, 0x4e, 0x4f, 0x09, 0x2b, 0xc5, 0x02, - 0x8a, 0xa6, 0xa0, 0xa6, 0x04, 0x9e, 0xf8, 0xc6, 0x31, 0x81, 0x5a, 0xd3, 0x52, 0x52, 0xe3, 0xd0, - 0x41, 0xa3, 0x7f, 0x84, 0x0a, 0xaa, 0x06, 0x8d, 0x1d, 0x0c, 0x19, 0xc4, 0xfe, 0x8e, 0x05, 0x3f, - 0xde, 0x0b, 0xdb, 0x3b, 0xba, 0x3e, 0xae, 0xc2, 0xe9, 0x3a, 0xd9, 0x70, 0xda, 0x7e, 0x92, 0xa6, - 0x28, 0x3a, 0xfd, 0xa8, 0xa8, 0x7c, 0x7a, 0x2e, 0x0f, 0x09, 0xe7, 0xd7, 0xb5, 0xff, 0x93, 0x05, - 0xa3, 0xc6, 0x67, 0x1d, 0xc3, 0xd1, 0x29, 0x48, 0x1f, 0x9d, 0x16, 0x0a, 0xdb, 0xa6, 0x5d, 0xce, - 0x4e, 0x9f, 0xb7, 0xe0, 0xac, 0x81, 0xb5, 0xe4, 0x24, 0xee, 0xe6, 0x85, 0xdb, 0xad, 0x88, 0xc4, - 0x31, 0x5d, 0x52, 0x8f, 0x1a, 0xec, 0x78, 0x66, 0x50, 0xb4, 0x50, 0xbe, 0x42, 0x76, 0x38, 0x6f, - 0x7e, 0x0a, 0xaa, 0x7c, 0xcf, 0x85, 0x91, 0x98, 0x24, 0xf5, 0x6d, 0xcb, 0xa2, 0x1c, 0x2b, 0x0c, - 0x64, 0x43, 0x3f, 0xe3, 0xb9, 0x94, 0x07, 0x51, 0x35, 0x01, 0xe8, 0xbc, 0x5f, 0x67, 0x25, 0x58, - 0x40, 0xec, 0x38, 0xd5, 0x9d, 0x95, 0x88, 0xb0, 0xf5, 0x50, 0xbf, 0xe8, 0x11, 0xbf, 0x1e, 0xd3, - 0x63, 0x9d, 0x13, 0x04, 0x61, 0x22, 0x4e, 0x68, 0xc6, 0xb1, 0x6e, 0x5a, 0x17, 0x63, 0x13, 0x87, - 0x12, 0xf5, 0x9d, 0x75, 0xe2, 0xf3, 0x11, 0x15, 0x44, 0x17, 0x59, 0x09, 0x16, 0x10, 0xfb, 0x4e, - 0x89, 0x1d, 0x20, 0x15, 0x47, 0x23, 0xc7, 0x61, 0x7d, 0x88, 0x52, 0x22, 0x60, 0xa5, 0x38, 0x7e, - 0x4c, 0xba, 0x5b, 0x20, 0x5e, 0xcb, 0x48, 0x01, 0x5c, 0x28, 0xd5, 0xbd, 0xad, 0x10, 0x1f, 0x2f, - 0xc3, 0x44, 0xba, 0x42, 0x87, 0x10, 0xa1, 0x47, 0x5e, 0x83, 0x50, 0xd6, 0x1e, 0x65, 0xe0, 0x63, - 0x13, 0xaf, 0x0b, 0x1f, 0x2e, 0x1d, 0x25, 0x1f, 0x36, 0xc5, 0x44, 0x79, 0x1f, 0x31, 0xf1, 0x84, - 0x1a, 0xf5, 0xbe, 0x0c, 0xcf, 0x4b, 0x8b, 0xca, 0x73, 0xd0, 0x17, 0x27, 0xa4, 0x35, 0x5e, 0x49, - 0xb3, 0xd9, 0xd5, 0x84, 0xb4, 0x30, 0x83, 0xa0, 0xf7, 0xc0, 0x68, 0xe2, 0x44, 0x0d, 0x92, 0x44, - 0x64, 0xdb, 0x63, 0xb6, 0x4b, 0x76, 0x9e, 0xad, 0xcd, 0x9c, 0xa4, 0x5a, 0xd7, 0x1a, 0x03, 0x61, - 0x09, 0xc2, 0x59, 0x5c, 0xfb, 0xbf, 0x95, 0xe0, 0xe1, 0xf4, 0x14, 0x68, 0xc1, 0xf8, 0xbe, 0x94, - 0x60, 0x7c, 0x87, 0x29, 0x18, 0xef, 0xee, 0x4e, 0xbc, 0xb5, 0x4b, 0xb5, 0x1f, 0x1a, 0xb9, 0x89, - 0xe6, 0x33, 0x93, 0x30, 0x95, 0x9e, 0x84, 0xbb, 0xbb, 0x13, 0x8f, 0x76, 0xf9, 0xc6, 0xcc, 0x2c, - 0x3d, 0x01, 0xfd, 0x11, 0x71, 0xe2, 0x30, 0x10, 0xf3, 0xa4, 0x66, 0x13, 0xb3, 0x52, 0x2c, 0xa0, - 0xf6, 0xb7, 0x6b, 0xd9, 0xc1, 0x9e, 0xe7, 0xf6, 0xd8, 0x30, 0x42, 0x1e, 0xf4, 0xb1, 0x53, 0x1b, - 0xe7, 0x2c, 0x57, 0x0e, 0xb7, 0x0b, 0xa9, 0x14, 0x51, 0x4d, 0xcf, 0x54, 0xe9, 0xac, 0xd1, 0x22, - 0xcc, 0x48, 0xa0, 0xdb, 0x50, 0x75, 0xe5, 0x61, 0xaa, 0x54, 0x84, 0xd9, 0x51, 0x1c, 0xa5, 0x34, - 0xc5, 0x21, 0xca, 0xee, 0xd5, 0x09, 0x4c, 0x51, 0x43, 0x04, 0xca, 0x0d, 0x2f, 0x11, 0xd3, 0x7a, - 0xc8, 0xe3, 0xf2, 0xbc, 0x67, 0x7c, 0xe2, 0x00, 0x95, 0x41, 0xf3, 0x5e, 0x82, 0x69, 0xfb, 0xe8, - 0xd3, 0x16, 0x0c, 0xc6, 0x6e, 0x73, 0x25, 0x0a, 0xb7, 0xbd, 0x3a, 0x89, 0x84, 0x8e, 0x79, 0x48, - 0xce, 0xb6, 0x3a, 0xbb, 0x24, 0x1b, 0xd4, 0x74, 0xb9, 0xf9, 0x42, 0x43, 0xb0, 0x49, 0x97, 0x9e, - 0xbd, 0x1e, 0x16, 0xdf, 0x3e, 0x47, 0x5c, 0xb6, 0xe3, 0xe4, 0x99, 0x99, 0xad, 0x94, 0x43, 0xeb, - 0xdc, 0x73, 0x6d, 0x77, 0x8b, 0xee, 0x37, 0xdd, 0xa1, 0xb7, 0xde, 0xd9, 0x9d, 0x78, 0x78, 0x36, - 0x9f, 0x26, 0xee, 0xd6, 0x19, 0x36, 0x60, 0xad, 0xb6, 0xef, 0x63, 0xf2, 0x6a, 0x9b, 0x30, 0x8b, - 0x58, 0x01, 0x03, 0xb6, 0xa2, 0x1b, 0xcc, 0x0c, 0x98, 0x01, 0xc1, 0x26, 0x5d, 0xf4, 0x2a, 0xf4, - 0x37, 0x9d, 0x24, 0xf2, 0x6e, 0x0b, 0x33, 0xd8, 0x21, 0x4f, 0x41, 0x4b, 0xac, 0x2d, 0x4d, 0x9c, - 0x09, 0x7a, 0x5e, 0x88, 0x05, 0x21, 0xd4, 0x84, 0x4a, 0x93, 0x44, 0x0d, 0x32, 0x5e, 0x2d, 0xc2, - 0xe4, 0xbf, 0x44, 0x9b, 0xd2, 0x04, 0x6b, 0x54, 0xb9, 0x62, 0x65, 0x98, 0x53, 0x41, 0x2f, 0x43, - 0x35, 0x26, 0x3e, 0x71, 0xa9, 0x7a, 0x54, 0x63, 0x14, 0xdf, 0xd9, 0xa3, 0xaa, 0x48, 0xf5, 0x92, - 0x55, 0x51, 0x95, 0x6f, 0x30, 0xf9, 0x0f, 0xab, 0x26, 0xe9, 0x00, 0xb6, 0xfc, 0x76, 0xc3, 0x0b, - 0xc6, 0xa1, 0x88, 0x01, 0x5c, 0x61, 0x6d, 0x65, 0x06, 0x90, 0x17, 0x62, 0x41, 0xc8, 0xfe, 0x2f, - 0x16, 0xa0, 0x34, 0x53, 0x3b, 0x06, 0x9d, 0xf8, 0xd5, 0xb4, 0x4e, 0xbc, 0x58, 0xa4, 0xd2, 0xd2, - 0x45, 0x2d, 0xfe, 0xad, 0x1a, 0x64, 0xc4, 0xc1, 0x55, 0x12, 0x27, 0xa4, 0xfe, 0x26, 0x0b, 0x7f, - 0x93, 0x85, 0xbf, 0xc9, 0xc2, 0x15, 0x0b, 0x5f, 0xcf, 0xb0, 0xf0, 0xf7, 0x1a, 0xbb, 0x5e, 0xdf, - 0xaf, 0xbf, 0xa2, 0x2e, 0xe0, 0xcd, 0x1e, 0x18, 0x08, 0x94, 0x13, 0x5c, 0x5e, 0x5d, 0xbe, 0x9a, - 0xcb, 0xb3, 0x5f, 0x49, 0xf3, 0xec, 0xc3, 0x92, 0xf8, 0x7f, 0x81, 0x4b, 0xff, 0x81, 0x05, 0x6f, - 0x4b, 0x73, 0x2f, 0xb9, 0x72, 0x16, 0x1a, 0x41, 0x18, 0x91, 0x39, 0x6f, 0x63, 0x83, 0x44, 0x24, - 0x70, 0x49, 0xac, 0x6c, 0x3b, 0x56, 0x37, 0xdb, 0x0e, 0x7a, 0x16, 0x86, 0x6e, 0xc6, 0x61, 0xb0, - 0x12, 0x7a, 0x81, 0x60, 0x41, 0xf4, 0xc4, 0x71, 0xe2, 0xce, 0xee, 0xc4, 0x10, 0x1d, 0x51, 0x59, - 0x8e, 0x53, 0x58, 0x68, 0x16, 0xc6, 0x6e, 0xbe, 0xba, 0xe2, 0x24, 0x86, 0x35, 0x41, 0x9e, 0xfb, - 0xd9, 0x7d, 0xd4, 0xe5, 0x17, 0x33, 0x40, 0xdc, 0x89, 0x6f, 0xff, 0xed, 0x12, 0x9c, 0xc9, 0x7c, - 0x48, 0xe8, 0xfb, 0x61, 0x3b, 0xa1, 0x67, 0x22, 0xf4, 0x55, 0x0b, 0x4e, 0x34, 0xd3, 0x06, 0x8b, - 0x58, 0x98, 0xbb, 0xdf, 0x5f, 0x98, 0x8c, 0xc8, 0x58, 0x44, 0x66, 0xc6, 0xc5, 0x08, 0x9d, 0xc8, - 0x00, 0x62, 0xdc, 0xd1, 0x17, 0xf4, 0x32, 0xd4, 0x9a, 0xce, 0xed, 0x6b, 0xad, 0xba, 0x93, 0xc8, - 0xe3, 0x68, 0x77, 0x2b, 0x42, 0x3b, 0xf1, 0xfc, 0x49, 0xee, 0xb9, 0x31, 0xb9, 0x10, 0x24, 0xcb, - 0xd1, 0x6a, 0x12, 0x79, 0x41, 0x83, 0x1b, 0x39, 0x97, 0x64, 0x33, 0x58, 0xb7, 0x68, 0x7f, 0xc5, - 0xca, 0x0a, 0x29, 0x35, 0x3a, 0x91, 0x93, 0x90, 0xc6, 0x0e, 0xfa, 0x08, 0x54, 0xe8, 0xb9, 0x51, - 0x8e, 0xca, 0x8d, 0x22, 0x25, 0xa7, 0x31, 0x13, 0x5a, 0x88, 0xd2, 0x7f, 0x31, 0xe6, 0x44, 0xed, - 0xaf, 0xd6, 0xb2, 0xca, 0x02, 0xbb, 0x9b, 0x3f, 0x0f, 0xd0, 0x08, 0xd7, 0x48, 0xb3, 0xe5, 0xd3, - 0x61, 0xb1, 0xd8, 0x05, 0x8f, 0x32, 0x95, 0xcc, 0x2b, 0x08, 0x36, 0xb0, 0xd0, 0x2f, 0x5b, 0x00, - 0x0d, 0xb9, 0xe6, 0xa5, 0x22, 0x70, 0xad, 0xc8, 0xcf, 0xd1, 0x3b, 0x4a, 0xf7, 0x45, 0x11, 0xc4, - 0x06, 0x71, 0xf4, 0x0b, 0x16, 0x54, 0x13, 0xd9, 0x7d, 0x2e, 0x1a, 0xd7, 0x8a, 0xec, 0x89, 0xfc, - 0x68, 0xad, 0x13, 0xa9, 0x21, 0x51, 0x74, 0xd1, 0x2f, 0x5a, 0x00, 0xf1, 0x4e, 0xe0, 0xae, 0x84, - 0xbe, 0xe7, 0xee, 0x08, 0x89, 0x79, 0xbd, 0x50, 0x73, 0x8e, 0x6a, 0x7d, 0x66, 0x84, 0x8e, 0x86, - 0xfe, 0x8f, 0x0d, 0xca, 0xe8, 0x63, 0x50, 0x8d, 0xc5, 0x72, 0x13, 0x32, 0x72, 0xad, 0x58, 0xa3, - 0x12, 0x6f, 0x5b, 0xb0, 0x57, 0xf1, 0x0f, 0x2b, 0x9a, 0xe8, 0x6f, 0x5a, 0x30, 0xda, 0x4a, 0x9b, - 0x09, 0x85, 0x38, 0x2c, 0x8e, 0x07, 0x64, 0xcc, 0x90, 0xdc, 0xda, 0x92, 0x29, 0xc4, 0xd9, 0x5e, - 0x50, 0x0e, 0xa8, 0x57, 0xf0, 0x72, 0x8b, 0x9b, 0x2c, 0x07, 0x34, 0x07, 0x9c, 0xcf, 0x02, 0x71, - 0x27, 0x3e, 0x5a, 0x81, 0x53, 0xb4, 0x77, 0x3b, 0x5c, 0xfd, 0x94, 0xe2, 0x25, 0x66, 0xc2, 0xb0, - 0x3a, 0xf3, 0x88, 0x58, 0x21, 0xec, 0xae, 0x23, 0x8b, 0x83, 0x73, 0x6b, 0xa2, 0x3f, 0xb2, 0xe0, - 0x11, 0x8f, 0x89, 0x01, 0xd3, 0x60, 0xaf, 0x25, 0x82, 0xb8, 0x68, 0x27, 0x85, 0xf2, 0x8a, 0x6e, - 0xe2, 0x67, 0xe6, 0xc7, 0xc5, 0x17, 0x3c, 0xb2, 0xb0, 0x47, 0x97, 0xf0, 0x9e, 0x1d, 0x46, 0x3f, - 0x05, 0xc3, 0x72, 0x5f, 0xac, 0x50, 0x16, 0xcc, 0x04, 0x6d, 0x6d, 0x66, 0xec, 0xce, 0xee, 0xc4, - 0xf0, 0x9a, 0x09, 0xc0, 0x69, 0x3c, 0xfb, 0xdf, 0x94, 0x53, 0xb7, 0x44, 0xca, 0x86, 0xc9, 0xd8, - 0x8d, 0x2b, 0xed, 0x3f, 0x92, 0x7b, 0x16, 0xca, 0x6e, 0x94, 0x75, 0x49, 0xb3, 0x1b, 0x55, 0x14, - 0x63, 0x83, 0x38, 0x55, 0x4a, 0xc7, 0x9c, 0xac, 0xa5, 0x54, 0x70, 0xc0, 0x97, 0x8b, 0xec, 0x52, - 0xe7, 0x9d, 0xde, 0x19, 0xd1, 0xb5, 0xb1, 0x0e, 0x10, 0xee, 0xec, 0x12, 0xfa, 0x28, 0xd4, 0x22, - 0xe5, 0xd9, 0x52, 0x2e, 0xe2, 0xa8, 0x26, 0x97, 0x8d, 0xe8, 0x8e, 0xba, 0x00, 0xd2, 0x3e, 0x2c, - 0x9a, 0xa2, 0xfd, 0x87, 0xe9, 0x8b, 0x31, 0x83, 0x77, 0xf4, 0x70, 0xe9, 0xf7, 0x05, 0x0b, 0x06, - 0xa3, 0xd0, 0xf7, 0xbd, 0xa0, 0x41, 0xf9, 0x9c, 0x10, 0xd6, 0x1f, 0x3c, 0x12, 0x79, 0x29, 0x18, - 0x1a, 0xd3, 0xac, 0xb1, 0xa6, 0x89, 0xcd, 0x0e, 0xd8, 0x7f, 0x66, 0xc1, 0x78, 0x37, 0x7e, 0x8c, - 0x08, 0xbc, 0x55, 0x32, 0x1b, 0x35, 0x14, 0xcb, 0xc1, 0x1c, 0xf1, 0x89, 0x32, 0x9b, 0x57, 0x67, - 0x1e, 0x17, 0x9f, 0xf9, 0xd6, 0x95, 0xee, 0xa8, 0x78, 0xaf, 0x76, 0xd0, 0x4b, 0x70, 0xc2, 0xf8, - 0xae, 0x58, 0x0d, 0x4c, 0x6d, 0x66, 0x92, 0x2a, 0x40, 0xd3, 0x19, 0xd8, 0xdd, 0xdd, 0x89, 0x87, - 0xb2, 0x65, 0x42, 0x60, 0x74, 0xb4, 0x63, 0xff, 0x46, 0x29, 0x3b, 0x5b, 0x4a, 0xd6, 0xbf, 0x61, - 0x75, 0x58, 0x13, 0xde, 0x7f, 0x14, 0xf2, 0x95, 0xd9, 0x1d, 0x94, 0x1b, 0x46, 0x77, 0x9c, 0xfb, - 0x78, 0x6d, 0x6f, 0xff, 0xdb, 0x3e, 0xd8, 0xa3, 0x67, 0x3d, 0x28, 0xef, 0x07, 0xbe, 0x47, 0xfd, - 0x9c, 0xa5, 0x2e, 0xcc, 0xf8, 0x1e, 0xae, 0x1f, 0xd5, 0xd8, 0xf3, 0xf3, 0x53, 0xcc, 0x5d, 0x47, - 0x94, 0x15, 0x3d, 0x7d, 0x35, 0x87, 0xbe, 0x66, 0xa5, 0xaf, 0xfc, 0xb8, 0x53, 0xa3, 0x77, 0x64, - 0x7d, 0x32, 0xee, 0x11, 0x79, 0xc7, 0xf4, 0xed, 0x53, 0xb7, 0x1b, 0xc6, 0x49, 0x80, 0x0d, 0x2f, - 0x70, 0x7c, 0xef, 0x35, 0x7a, 0x3a, 0xaa, 0x30, 0x01, 0xcf, 0x34, 0xa6, 0x8b, 0xaa, 0x14, 0x1b, - 0x18, 0x67, 0xff, 0x7f, 0x18, 0x34, 0xbe, 0x3c, 0xc7, 0xe3, 0xe5, 0x94, 0xe9, 0xf1, 0x52, 0x33, - 0x1c, 0x55, 0xce, 0xbe, 0x17, 0x4e, 0x64, 0x3b, 0x78, 0x90, 0xfa, 0xf6, 0xff, 0x1a, 0xc8, 0xde, - 0xc1, 0xad, 0x91, 0xa8, 0x49, 0xbb, 0xf6, 0xa6, 0x61, 0xeb, 0x4d, 0xc3, 0xd6, 0x9b, 0x86, 0x2d, - 0xf3, 0x6e, 0x42, 0x18, 0x6d, 0x06, 0x8e, 0xc9, 0x68, 0x93, 0x32, 0x43, 0x55, 0x0b, 0x37, 0x43, - 0xd9, 0x9f, 0xee, 0xb0, 0xdc, 0xaf, 0x45, 0x84, 0xa0, 0x10, 0x2a, 0x41, 0x58, 0x27, 0x52, 0xc7, - 0xbd, 0x5c, 0x8c, 0xc2, 0x76, 0x35, 0xac, 0x1b, 0xee, 0xe2, 0xf4, 0x5f, 0x8c, 0x39, 0x1d, 0xfb, - 0x4e, 0x05, 0x52, 0xea, 0x24, 0x9f, 0xf7, 0xb7, 0xc3, 0x40, 0x44, 0x5a, 0xe1, 0x35, 0xbc, 0x28, - 0x64, 0x99, 0x8e, 0x28, 0xe1, 0xc5, 0x58, 0xc2, 0xa9, 0xcc, 0x6b, 0x39, 0xc9, 0xa6, 0x10, 0x66, - 0x4a, 0xe6, 0xad, 0x38, 0xc9, 0x26, 0x66, 0x10, 0xf4, 0x5e, 0x18, 0x49, 0x52, 0x57, 0xe1, 0xe2, - 0xca, 0xf7, 0x21, 0x81, 0x3b, 0x92, 0xbe, 0x28, 0xc7, 0x19, 0x6c, 0xf4, 0x2a, 0xf4, 0x6d, 0x12, - 0xbf, 0x29, 0xa6, 0x7e, 0xb5, 0x38, 0x59, 0xc3, 0xbe, 0xf5, 0x12, 0xf1, 0x9b, 0x9c, 0x13, 0xd2, - 0x5f, 0x98, 0x91, 0xa2, 0xeb, 0xbe, 0xb6, 0xd5, 0x8e, 0x93, 0xb0, 0xe9, 0xbd, 0x26, 0x2d, 0x9d, - 0xef, 0x2f, 0x98, 0xf0, 0x15, 0xd9, 0x3e, 0x37, 0x29, 0xa9, 0xbf, 0x58, 0x53, 0x66, 0xfd, 0xa8, - 0x7b, 0x11, 0x5b, 0x32, 0x3b, 0xc2, 0x60, 0x59, 0x74, 0x3f, 0xe6, 0x64, 0xfb, 0xbc, 0x1f, 0xea, - 0x2f, 0xd6, 0x94, 0xd1, 0x8e, 0xda, 0x7f, 0x83, 0xac, 0x0f, 0xd7, 0x0a, 0xee, 0x03, 0xdf, 0x7b, - 0xb9, 0xfb, 0xf0, 0x71, 0xa8, 0xb8, 0x9b, 0x4e, 0x94, 0x8c, 0x0f, 0xb1, 0x45, 0xa3, 0x56, 0xf1, - 0x2c, 0x2d, 0xc4, 0x1c, 0x86, 0x1e, 0x85, 0x72, 0x44, 0x36, 0x98, 0x77, 0xb2, 0xe1, 0x17, 0x85, - 0xc9, 0x06, 0xa6, 0xe5, 0xf6, 0xaf, 0x95, 0xd2, 0x6a, 0x5b, 0xfa, 0xbb, 0xf9, 0x6a, 0x77, 0xdb, - 0x51, 0x2c, 0xcd, 0x5f, 0xc6, 0x6a, 0x67, 0xc5, 0x58, 0xc2, 0xd1, 0x27, 0x2c, 0x18, 0xb8, 0x19, - 0x87, 0x41, 0x40, 0x12, 0x21, 0x22, 0xaf, 0x17, 0x3c, 0x14, 0x97, 0x79, 0xeb, 0xba, 0x0f, 0xa2, - 0x00, 0x4b, 0xba, 0xb4, 0xbb, 0xe4, 0xb6, 0xeb, 0xb7, 0xeb, 0x1d, 0xae, 0x2e, 0x17, 0x78, 0x31, - 0x96, 0x70, 0x8a, 0xea, 0x05, 0x1c, 0xb5, 0x2f, 0x8d, 0xba, 0x10, 0x08, 0x54, 0x01, 0xb7, 0xbf, - 0x39, 0x00, 0xa7, 0x73, 0x37, 0x07, 0x55, 0xa8, 0x98, 0xca, 0x72, 0xd1, 0xf3, 0x89, 0x74, 0xf2, - 0x62, 0x0a, 0xd5, 0x75, 0x55, 0x8a, 0x0d, 0x0c, 0xf4, 0xf3, 0x00, 0x2d, 0x27, 0x72, 0x9a, 0x44, - 0x99, 0xa7, 0x0f, 0xad, 0xb7, 0xd0, 0x7e, 0xac, 0xc8, 0x36, 0xf5, 0x11, 0x5d, 0x15, 0xc5, 0xd8, - 0x20, 0x89, 0x9e, 0x83, 0xc1, 0x88, 0xf8, 0xc4, 0x89, 0x99, 0x73, 0x7b, 0x36, 0x52, 0x07, 0x6b, - 0x10, 0x36, 0xf1, 0xd0, 0x13, 0xca, 0x1f, 0x2e, 0xe3, 0x17, 0x94, 0xf6, 0x89, 0x43, 0xaf, 0x5b, - 0x30, 0xb2, 0xe1, 0xf9, 0x44, 0x53, 0x17, 0x71, 0x35, 0xcb, 0x87, 0xff, 0xc8, 0x8b, 0x66, 0xbb, - 0x9a, 0x43, 0xa6, 0x8a, 0x63, 0x9c, 0x21, 0x4f, 0xa7, 0x79, 0x9b, 0x44, 0x8c, 0xb5, 0xf6, 0xa7, - 0xa7, 0xf9, 0x3a, 0x2f, 0xc6, 0x12, 0x8e, 0xa6, 0x61, 0xb4, 0xe5, 0xc4, 0xf1, 0x6c, 0x44, 0xea, - 0x24, 0x48, 0x3c, 0xc7, 0xe7, 0x51, 0x2f, 0x55, 0xed, 0x2c, 0xbe, 0x92, 0x06, 0xe3, 0x2c, 0x3e, - 0xfa, 0x00, 0x3c, 0xcc, 0xed, 0x3f, 0x4b, 0x5e, 0x1c, 0x7b, 0x41, 0x43, 0x2f, 0x03, 0x61, 0x06, - 0x9b, 0x10, 0x4d, 0x3d, 0xbc, 0x90, 0x8f, 0x86, 0xbb, 0xd5, 0x47, 0x4f, 0x41, 0x35, 0xde, 0xf2, - 0x5a, 0xb3, 0x51, 0x3d, 0x66, 0x77, 0x3f, 0x55, 0x6d, 0x74, 0x5d, 0x15, 0xe5, 0x58, 0x61, 0x20, - 0x17, 0x86, 0xf8, 0x94, 0x70, 0x87, 0x3e, 0xc1, 0x1f, 0x9f, 0xee, 0x2a, 0xa6, 0x45, 0x10, 0xe7, - 0x24, 0x76, 0x6e, 0x5d, 0x90, 0x37, 0x51, 0xfc, 0xe2, 0xe4, 0xba, 0xd1, 0x0c, 0x4e, 0x35, 0x9a, - 0x3e, 0xb1, 0x0d, 0xf6, 0x70, 0x62, 0x7b, 0x0e, 0x06, 0xb7, 0xda, 0xeb, 0x44, 0x8c, 0xbc, 0x60, - 0x5b, 0x6a, 0xf5, 0x5d, 0xd1, 0x20, 0x6c, 0xe2, 0x31, 0x5f, 0xca, 0x96, 0x27, 0xfe, 0xc5, 0xe3, - 0xc3, 0x86, 0x2f, 0xe5, 0xca, 0x82, 0x2c, 0xc6, 0x26, 0x8e, 0xfd, 0x2b, 0xa5, 0xb4, 0x51, 0xc2, - 0xe4, 0x1f, 0x28, 0xa6, 0x5c, 0x22, 0xb9, 0xee, 0x44, 0x52, 0x97, 0x38, 0x64, 0xdc, 0x90, 0x68, - 0xf7, 0xba, 0x13, 0x99, 0xfc, 0x86, 0x11, 0xc0, 0x92, 0x12, 0xba, 0x09, 0x7d, 0x89, 0xef, 0x14, - 0x14, 0x68, 0x68, 0x50, 0xd4, 0x36, 0xa2, 0xc5, 0xe9, 0x18, 0x33, 0x1a, 0xe8, 0x11, 0x7a, 0x30, - 0x5a, 0x97, 0x97, 0x58, 0xe2, 0x2c, 0xb3, 0x1e, 0x63, 0x56, 0x6a, 0xff, 0xf9, 0x60, 0x0e, 0xcb, - 0x57, 0x32, 0x16, 0x9d, 0x07, 0xa0, 0x33, 0xb6, 0x12, 0x91, 0x0d, 0xef, 0xb6, 0xd0, 0x71, 0x14, - 0x5b, 0xb9, 0xaa, 0x20, 0xd8, 0xc0, 0x92, 0x75, 0x56, 0xdb, 0x1b, 0xb4, 0x4e, 0xa9, 0xb3, 0x0e, - 0x87, 0x60, 0x03, 0x0b, 0x3d, 0x0b, 0xfd, 0x5e, 0xd3, 0x69, 0x28, 0x1f, 0xdb, 0x47, 0x28, 0x3f, - 0x59, 0x60, 0x25, 0x77, 0x77, 0x27, 0x46, 0x54, 0x87, 0x58, 0x11, 0x16, 0xb8, 0xe8, 0x37, 0x2c, - 0x18, 0x72, 0xc3, 0x66, 0x33, 0x0c, 0xf8, 0xc9, 0x54, 0x1c, 0xb3, 0x6f, 0x1e, 0x95, 0x06, 0x32, - 0x39, 0x6b, 0x10, 0xe3, 0xe7, 0x6c, 0x15, 0x11, 0x69, 0x82, 0x70, 0xaa, 0x57, 0x26, 0xdb, 0xa9, - 0xec, 0xc3, 0x76, 0x7e, 0xd3, 0x82, 0x31, 0x5e, 0xd7, 0x38, 0x30, 0x8b, 0xe0, 0xbf, 0xf0, 0x88, - 0x3f, 0xab, 0xc3, 0x86, 0xa0, 0xec, 0xa8, 0x1d, 0x70, 0xdc, 0xd9, 0x49, 0x34, 0x0f, 0x63, 0x1b, - 0x61, 0xe4, 0x12, 0x73, 0x20, 0x04, 0xcf, 0x54, 0x0d, 0x5d, 0xcc, 0x22, 0xe0, 0xce, 0x3a, 0xe8, - 0x3a, 0x3c, 0x64, 0x14, 0x9a, 0xe3, 0xc0, 0xd9, 0xe6, 0x63, 0xa2, 0xb5, 0x87, 0x2e, 0xe6, 0x62, - 0xe1, 0x2e, 0xb5, 0xd3, 0x1c, 0xaa, 0xd6, 0x03, 0x87, 0x7a, 0x05, 0xce, 0xb8, 0x9d, 0x23, 0xb3, - 0x1d, 0xb7, 0xd7, 0x63, 0xce, 0x44, 0xab, 0x33, 0x3f, 0x26, 0x1a, 0x38, 0x33, 0xdb, 0x0d, 0x11, - 0x77, 0x6f, 0x03, 0x7d, 0x04, 0xaa, 0x11, 0x61, 0xb3, 0x12, 0x8b, 0x48, 0xb8, 0x43, 0x1a, 0x12, - 0xb4, 0x72, 0xcc, 0x9b, 0xd5, 0x62, 0x41, 0x14, 0xc4, 0x58, 0x51, 0x44, 0xb7, 0x60, 0xa0, 0xe5, - 0x24, 0xee, 0xa6, 0x88, 0x7f, 0x3b, 0xb4, 0xd9, 0x5b, 0x11, 0x67, 0xb7, 0x14, 0x46, 0xc4, 0x3c, - 0x27, 0x82, 0x25, 0x35, 0xaa, 0x28, 0xb9, 0x61, 0xb3, 0x15, 0x06, 0x24, 0x48, 0x24, 0x07, 0x1f, - 0xe1, 0x57, 0x09, 0xb2, 0x14, 0x1b, 0x18, 0x68, 0x05, 0x4e, 0x31, 0xb3, 0xda, 0x0d, 0x2f, 0xd9, - 0x0c, 0xdb, 0x89, 0x3c, 0x25, 0x8e, 0x8f, 0xa4, 0x2f, 0x93, 0x16, 0x73, 0x70, 0x70, 0x6e, 0xcd, - 0xac, 0xec, 0x19, 0xbd, 0x37, 0xd9, 0x73, 0x62, 0x7f, 0xd9, 0x73, 0xf6, 0x7d, 0x30, 0xd6, 0xc1, - 0x34, 0x0e, 0x64, 0x3b, 0x9b, 0x83, 0x87, 0xf2, 0xb7, 0xe7, 0x81, 0x2c, 0x68, 0xff, 0x34, 0xe3, - 0x42, 0x6d, 0x9c, 0x26, 0x7a, 0xb0, 0xc6, 0x3a, 0x50, 0x26, 0xc1, 0xb6, 0x90, 0x56, 0x17, 0x0f, - 0xb7, 0x4a, 0x2e, 0x04, 0xdb, 0x9c, 0xbb, 0x30, 0x93, 0xd3, 0x85, 0x60, 0x1b, 0xd3, 0xb6, 0xd1, - 0x97, 0xac, 0x94, 0x36, 0xcc, 0x6d, 0xb8, 0x1f, 0x3a, 0x92, 0xe3, 0x53, 0xcf, 0x0a, 0xb2, 0xfd, - 0xef, 0x4a, 0x70, 0x6e, 0xbf, 0x46, 0x7a, 0x18, 0xbe, 0xc7, 0xa1, 0x3f, 0x66, 0x4e, 0x11, 0x82, - 0xfd, 0x0f, 0xd2, 0x5d, 0xc1, 0xdd, 0x24, 0x5e, 0xc1, 0x02, 0x84, 0x7c, 0x28, 0x37, 0x9d, 0x96, - 0x30, 0xed, 0x2d, 0x1c, 0x36, 0xd4, 0x8c, 0xfe, 0x77, 0xfc, 0x25, 0xa7, 0xc5, 0x97, 0xa7, 0x51, - 0x80, 0x29, 0x19, 0x94, 0x40, 0xc5, 0x89, 0x22, 0x47, 0xde, 0xc0, 0x5f, 0x29, 0x86, 0xde, 0x34, - 0x6d, 0x92, 0x5f, 0x60, 0xa6, 0x8a, 0x30, 0x27, 0x66, 0x7f, 0x6e, 0x20, 0x15, 0x97, 0xc4, 0xdc, - 0x2a, 0x62, 0xe8, 0x17, 0x16, 0x3d, 0xab, 0xe8, 0x08, 0x3f, 0x1e, 0xf8, 0xcb, 0x0e, 0xcb, 0x22, - 0x7d, 0x82, 0x20, 0x85, 0x3e, 0x6b, 0xb1, 0x24, 0x05, 0x32, 0xd8, 0x4b, 0x1c, 0x51, 0x8f, 0x26, - 0x67, 0x82, 0x99, 0xfa, 0x40, 0x16, 0x62, 0x93, 0xba, 0x48, 0x36, 0xc2, 0x54, 0xf3, 0xce, 0x64, - 0x23, 0x4c, 0xd5, 0x96, 0x70, 0x74, 0x3b, 0xc7, 0x7d, 0xa2, 0x80, 0x40, 0xf7, 0x1e, 0x1c, 0x26, - 0xbe, 0x66, 0xc1, 0x98, 0x97, 0xbd, 0x07, 0x17, 0x07, 0xba, 0x1b, 0xc5, 0x98, 0xdf, 0x3a, 0xaf, - 0xd9, 0x95, 0xe2, 0xd0, 0x01, 0xc2, 0x9d, 0x9d, 0x41, 0x75, 0xe8, 0xf3, 0x82, 0x8d, 0x50, 0xa8, - 0x4b, 0x33, 0x87, 0xeb, 0xd4, 0x42, 0xb0, 0x11, 0xea, 0xdd, 0x4c, 0xff, 0x61, 0xd6, 0x3a, 0x5a, - 0x84, 0x53, 0x32, 0x34, 0xe5, 0x92, 0x17, 0x27, 0x61, 0xb4, 0xb3, 0xe8, 0x35, 0xbd, 0x84, 0xa9, - 0x3a, 0xe5, 0x99, 0x71, 0x2a, 0x89, 0x70, 0x0e, 0x1c, 0xe7, 0xd6, 0x42, 0xaf, 0xc1, 0x80, 0xbc, - 0x7b, 0xae, 0x16, 0x71, 0x38, 0xee, 0x5c, 0xff, 0x6a, 0x31, 0xad, 0x8a, 0xcb, 0x67, 0x49, 0xd0, - 0x7e, 0x7d, 0x10, 0x3a, 0xaf, 0xc8, 0xd3, 0xf7, 0xe1, 0xd6, 0x71, 0xdf, 0x87, 0xd3, 0xa3, 0x51, - 0xac, 0xaf, 0xb2, 0x0b, 0x58, 0xdb, 0x82, 0xaa, 0xbe, 0xa6, 0xdc, 0x09, 0x5c, 0xcc, 0x68, 0xa0, - 0x08, 0xfa, 0x37, 0x89, 0xe3, 0x27, 0x9b, 0xc5, 0xdc, 0xa8, 0x5c, 0x62, 0x6d, 0x65, 0xe3, 0xc9, - 0x78, 0x29, 0x16, 0x94, 0xd0, 0x6d, 0x18, 0xd8, 0xe4, 0x0b, 0x40, 0x9c, 0x56, 0x96, 0x0e, 0x3b, - 0xb8, 0xa9, 0x55, 0xa5, 0xa7, 0x5b, 0x14, 0x60, 0x49, 0x8e, 0xf9, 0x5e, 0x19, 0xde, 0x21, 0x7c, - 0xeb, 0x16, 0x17, 0x4a, 0xd7, 0xbb, 0x6b, 0xc8, 0x87, 0x61, 0x28, 0x22, 0x6e, 0x18, 0xb8, 0x9e, - 0x4f, 0xea, 0xd3, 0xf2, 0xb6, 0xe4, 0x20, 0x11, 0x54, 0xcc, 0x18, 0x81, 0x8d, 0x36, 0x70, 0xaa, - 0x45, 0xf4, 0x19, 0x0b, 0x46, 0x54, 0x54, 0x35, 0x9d, 0x10, 0x22, 0xac, 0xe2, 0x8b, 0x05, 0xc5, - 0x70, 0xb3, 0x36, 0x67, 0xd0, 0x9d, 0xdd, 0x89, 0x91, 0x74, 0x19, 0xce, 0xd0, 0x45, 0x2f, 0x01, - 0x84, 0xeb, 0xdc, 0xc1, 0x6a, 0x3a, 0x11, 0x26, 0xf2, 0x83, 0x7c, 0xea, 0x08, 0x8f, 0xc4, 0x94, - 0x2d, 0x60, 0xa3, 0x35, 0x74, 0x05, 0x80, 0x6f, 0x9b, 0xb5, 0x9d, 0x96, 0x3c, 0xd2, 0xc8, 0x10, - 0x38, 0x58, 0x55, 0x90, 0xbb, 0xbb, 0x13, 0x9d, 0x26, 0x4b, 0xe6, 0x45, 0x62, 0x54, 0x47, 0x3f, - 0x07, 0x03, 0x71, 0xbb, 0xd9, 0x74, 0x94, 0x01, 0xbd, 0xc0, 0xd8, 0x4e, 0xde, 0xae, 0xc1, 0x8a, - 0x78, 0x01, 0x96, 0x14, 0xd1, 0x4d, 0xca, 0x54, 0x63, 0x61, 0x4b, 0x65, 0xbb, 0x88, 0xeb, 0x04, - 0xdc, 0x90, 0xf4, 0x2e, 0xa9, 0xe2, 0xe3, 0x1c, 0x9c, 0xbb, 0xbb, 0x13, 0x0f, 0xa5, 0xcb, 0x17, - 0x43, 0x11, 0x6d, 0x99, 0xdb, 0x26, 0xba, 0x2c, 0x93, 0x2c, 0xd1, 0xcf, 0x96, 0xb9, 0x3f, 0x9e, - 0xd4, 0x49, 0x96, 0x58, 0x71, 0xf7, 0x31, 0x33, 0x2b, 0xa3, 0x25, 0x38, 0xe9, 0x86, 0x41, 0x12, - 0x85, 0xbe, 0xcf, 0x93, 0x8c, 0xf1, 0xd3, 0x25, 0x37, 0xb0, 0xbf, 0x55, 0x74, 0xfb, 0xe4, 0x6c, - 0x27, 0x0a, 0xce, 0xab, 0x67, 0x07, 0xe9, 0xcb, 0x2e, 0x31, 0x38, 0xcf, 0xc2, 0x10, 0xb9, 0x9d, - 0x90, 0x28, 0x70, 0xfc, 0x6b, 0x78, 0x51, 0x9a, 0x96, 0xd9, 0x1e, 0xb8, 0x60, 0x94, 0xe3, 0x14, - 0x16, 0xb2, 0x95, 0x49, 0xc5, 0x88, 0x20, 0xe6, 0x26, 0x15, 0x69, 0x40, 0xb1, 0xbf, 0x59, 0x4e, - 0x29, 0x64, 0xf7, 0xe5, 0x6a, 0x8d, 0xa5, 0xaa, 0x91, 0x39, 0x7d, 0x18, 0x40, 0x1c, 0x34, 0x8a, - 0xa4, 0xac, 0x52, 0xd5, 0x2c, 0x9b, 0x84, 0x70, 0x9a, 0x2e, 0xda, 0x82, 0xca, 0x66, 0x18, 0x27, - 0xf2, 0xf8, 0x71, 0xc8, 0x93, 0xce, 0xa5, 0x30, 0x4e, 0x98, 0x16, 0xa1, 0x3e, 0x9b, 0x96, 0xc4, - 0x98, 0xd3, 0xa0, 0x67, 0xd0, 0x78, 0xd3, 0x89, 0xea, 0xf1, 0x2c, 0x8b, 0xf7, 0xef, 0x63, 0xea, - 0x83, 0x52, 0x16, 0x57, 0x35, 0x08, 0x9b, 0x78, 0xf6, 0x5f, 0x58, 0xa9, 0xfb, 0x87, 0x1b, 0xcc, - 0x79, 0x7b, 0x9b, 0x04, 0x94, 0x1b, 0x98, 0xee, 0x62, 0x3f, 0x95, 0x09, 0x85, 0x7d, 0x5b, 0xb7, - 0xd4, 0x7b, 0xb7, 0x68, 0x0b, 0x93, 0xac, 0x09, 0xc3, 0xb3, 0xec, 0xe3, 0x56, 0x3a, 0xa6, 0xb9, - 0x54, 0xc4, 0xb9, 0xc4, 0x8c, 0xeb, 0xdf, 0x37, 0x3c, 0xda, 0xfe, 0x92, 0x05, 0x03, 0x33, 0x8e, - 0xbb, 0x15, 0x6e, 0x6c, 0xa0, 0xa7, 0xa0, 0x5a, 0x6f, 0x47, 0x66, 0x78, 0xb5, 0xb2, 0x6c, 0xcc, - 0x89, 0x72, 0xac, 0x30, 0xe8, 0xd2, 0xdf, 0x70, 0x5c, 0x19, 0xdd, 0x5f, 0xe6, 0x4b, 0xff, 0x22, - 0x2b, 0xc1, 0x02, 0x42, 0x87, 0xbf, 0xe9, 0xdc, 0x96, 0x95, 0xb3, 0x97, 0x1f, 0x4b, 0x1a, 0x84, - 0x4d, 0x3c, 0xfb, 0x5f, 0x59, 0x30, 0x3e, 0xe3, 0xc4, 0x9e, 0x3b, 0xdd, 0x4e, 0x36, 0x67, 0xbc, - 0x64, 0xbd, 0xed, 0x6e, 0x91, 0x84, 0x67, 0x81, 0xa0, 0xbd, 0x6c, 0xc7, 0x74, 0x07, 0xaa, 0xe3, - 0xa0, 0xea, 0xe5, 0x35, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x0d, 0x06, 0x5b, 0x4e, 0x1c, 0xdf, 0x0a, - 0xa3, 0x3a, 0x26, 0x1b, 0xc5, 0xe4, 0x89, 0x59, 0x25, 0x6e, 0x44, 0x12, 0x4c, 0x36, 0x84, 0xa3, - 0x80, 0x6e, 0x1f, 0x9b, 0xc4, 0xec, 0x5f, 0xb6, 0xe0, 0xd4, 0x0c, 0x71, 0x22, 0x12, 0xb1, 0xb4, - 0x32, 0xea, 0x43, 0xd0, 0xab, 0x50, 0x4d, 0x68, 0x09, 0xed, 0x91, 0x55, 0x6c, 0x8f, 0xd8, 0x15, - 0xff, 0x9a, 0x68, 0x1c, 0x2b, 0x32, 0xf6, 0x17, 0x2c, 0x38, 0x93, 0xd7, 0x97, 0x59, 0x3f, 0x6c, - 0xd7, 0xef, 0x47, 0x87, 0xfe, 0x96, 0x05, 0x43, 0xec, 0xda, 0x74, 0x8e, 0x24, 0x8e, 0xe7, 0x77, - 0xa4, 0xb4, 0xb3, 0x7a, 0x4c, 0x69, 0x77, 0x0e, 0xfa, 0x36, 0xc3, 0x26, 0xc9, 0x5e, 0xf9, 0x5f, - 0x0a, 0x9b, 0x04, 0x33, 0x08, 0x7a, 0x86, 0x2e, 0x42, 0x2f, 0x48, 0x1c, 0xba, 0x1d, 0xa5, 0xed, - 0x7b, 0x94, 0x2f, 0x40, 0x55, 0x8c, 0x4d, 0x1c, 0xfb, 0x5f, 0xd6, 0x60, 0x40, 0xf8, 0xa7, 0xf4, - 0x9c, 0x95, 0x44, 0x9a, 0x28, 0x4a, 0x5d, 0x4d, 0x14, 0x31, 0xf4, 0xbb, 0x2c, 0xb7, 0xa6, 0xd0, - 0x84, 0xaf, 0x14, 0xe2, 0xd0, 0xc4, 0xd3, 0x75, 0xea, 0x6e, 0xf1, 0xff, 0x58, 0x90, 0x42, 0x5f, - 0xb4, 0x60, 0xd4, 0x0d, 0x83, 0x80, 0xb8, 0x5a, 0x4d, 0xeb, 0x2b, 0xc2, 0x6f, 0x65, 0x36, 0xdd, - 0xa8, 0xbe, 0xb3, 0xcb, 0x00, 0x70, 0x96, 0x3c, 0x7a, 0x01, 0x86, 0xf9, 0x98, 0x5d, 0x4f, 0x19, - 0xec, 0x75, 0xa6, 0x33, 0x13, 0x88, 0xd3, 0xb8, 0x68, 0x92, 0x5f, 0x7c, 0x88, 0x9c, 0x62, 0xfd, - 0xda, 0xae, 0x69, 0x64, 0x13, 0x33, 0x30, 0x50, 0x04, 0x28, 0x22, 0x1b, 0x11, 0x89, 0x37, 0x85, - 0xff, 0x0e, 0x53, 0x11, 0x07, 0xee, 0x2d, 0x9f, 0x00, 0xee, 0x68, 0x09, 0xe7, 0xb4, 0x8e, 0xb6, - 0xc4, 0x19, 0xb9, 0x5a, 0x04, 0x3f, 0x17, 0xd3, 0xdc, 0xf5, 0xa8, 0x3c, 0x01, 0x15, 0x26, 0xba, - 0x98, 0x6a, 0x5a, 0xe6, 0x31, 0x6c, 0x4c, 0xb0, 0x61, 0x5e, 0x8e, 0xe6, 0xe0, 0x44, 0x26, 0x4f, - 0x5b, 0x2c, 0x0c, 0xeb, 0x2a, 0x5e, 0x29, 0x93, 0xe1, 0x2d, 0xc6, 0x1d, 0x35, 0x4c, 0xfb, 0xc9, - 0xe0, 0x3e, 0xf6, 0x93, 0x1d, 0xe5, 0x25, 0xca, 0x4d, 0xde, 0x2f, 0x16, 0x32, 0x00, 0x3d, 0xb9, - 0x84, 0x7e, 0x3e, 0xe3, 0x12, 0x3a, 0xcc, 0x3a, 0x70, 0xbd, 0x98, 0x0e, 0x1c, 0xdc, 0xff, 0xf3, - 0x7e, 0xfa, 0x73, 0xfe, 0x4f, 0x0b, 0xe4, 0xbc, 0xce, 0x3a, 0xee, 0x26, 0xa1, 0x4b, 0x06, 0xbd, - 0x17, 0x46, 0x94, 0x15, 0x80, 0xab, 0x44, 0x16, 0x5b, 0x35, 0xea, 0x72, 0x1f, 0xa7, 0xa0, 0x38, - 0x83, 0x8d, 0xa6, 0xa0, 0x46, 0xc7, 0x89, 0x57, 0xe5, 0x72, 0x5f, 0x59, 0x1a, 0xa6, 0x57, 0x16, - 0x44, 0x2d, 0x8d, 0x83, 0x42, 0x18, 0xf3, 0x9d, 0x38, 0x61, 0x3d, 0x58, 0xdd, 0x09, 0xdc, 0x7b, - 0xcc, 0xe6, 0xc1, 0x82, 0x62, 0x16, 0xb3, 0x0d, 0xe1, 0xce, 0xb6, 0xed, 0xef, 0xf6, 0xc1, 0x70, - 0x8a, 0x33, 0x1e, 0x50, 0x61, 0x78, 0x0a, 0xaa, 0x52, 0x86, 0x67, 0xd3, 0x16, 0x29, 0x41, 0xaf, - 0x30, 0xa8, 0xd0, 0x5a, 0xd7, 0x52, 0x35, 0xab, 0xe0, 0x18, 0x02, 0x17, 0x9b, 0x78, 0x8c, 0x29, - 0x27, 0x7e, 0x3c, 0xeb, 0x7b, 0x24, 0x48, 0x78, 0x37, 0x8b, 0x61, 0xca, 0x6b, 0x8b, 0xab, 0x66, - 0xa3, 0x9a, 0x29, 0x67, 0x00, 0x38, 0x4b, 0x1e, 0x7d, 0xca, 0x82, 0x61, 0xe7, 0x56, 0xac, 0x13, - 0x40, 0x0b, 0xe7, 0xcf, 0x43, 0x0a, 0xa9, 0x54, 0x4e, 0x69, 0x6e, 0xb5, 0x4e, 0x15, 0xe1, 0x34, - 0x51, 0xf4, 0x86, 0x05, 0x88, 0xdc, 0x26, 0xae, 0x74, 0x4f, 0x15, 0x7d, 0xe9, 0x2f, 0xe2, 0xb0, - 0x7c, 0xa1, 0xa3, 0x5d, 0xce, 0xd5, 0x3b, 0xcb, 0x71, 0x4e, 0x1f, 0xec, 0x7f, 0x51, 0x56, 0x1b, - 0x4a, 0x7b, 0x44, 0x3b, 0x86, 0x67, 0xa6, 0x75, 0xef, 0x9e, 0x99, 0xda, 0xb3, 0xa4, 0x33, 0x48, - 0x38, 0x15, 0x53, 0x58, 0xba, 0x4f, 0x31, 0x85, 0xbf, 0x60, 0xa5, 0x12, 0x74, 0x0d, 0x9e, 0x7f, - 0xa9, 0x58, 0x6f, 0xec, 0x49, 0xee, 0xf5, 0x92, 0xe1, 0xee, 0x69, 0x67, 0x27, 0xca, 0x4d, 0x0d, - 0xb4, 0x03, 0x71, 0xc3, 0xff, 0x50, 0x86, 0x41, 0x43, 0x92, 0xe6, 0xaa, 0x45, 0xd6, 0x03, 0xa6, - 0x16, 0x95, 0x0e, 0xa0, 0x16, 0xfd, 0x3c, 0xd4, 0x5c, 0xc9, 0xe5, 0x8b, 0x49, 0x21, 0x9e, 0x95, - 0x1d, 0x9a, 0xd1, 0xab, 0x22, 0xac, 0x69, 0xa2, 0xf9, 0x54, 0x24, 0x5a, 0xea, 0xbc, 0x9d, 0x17, - 0x2a, 0x26, 0x24, 0x45, 0x67, 0x9d, 0xec, 0xfd, 0x6f, 0xa5, 0x07, 0xdf, 0xa3, 0xef, 0x5a, 0x6a, - 0x72, 0x8f, 0x21, 0xe5, 0xc8, 0xcd, 0x74, 0xca, 0x91, 0x0b, 0x85, 0x0c, 0x73, 0x97, 0x5c, 0x23, - 0x57, 0x61, 0x60, 0x36, 0x6c, 0x36, 0x9d, 0xa0, 0x8e, 0x7e, 0x02, 0x06, 0x5c, 0xfe, 0x53, 0xd8, - 0xa6, 0xd8, 0x0d, 0xa7, 0x80, 0x62, 0x09, 0x43, 0x8f, 0x40, 0x9f, 0x13, 0x35, 0xa4, 0x3d, 0x8a, - 0x79, 0x22, 0x4d, 0x47, 0x8d, 0x18, 0xb3, 0x52, 0xfb, 0x9f, 0xf4, 0x01, 0x73, 0x00, 0x70, 0x22, - 0x52, 0x5f, 0x0b, 0x59, 0xe6, 0xcf, 0x23, 0xbd, 0x17, 0xd4, 0x87, 0xa5, 0x07, 0xf9, 0x6e, 0xd0, - 0xb8, 0x1f, 0x2a, 0x1f, 0xf3, 0xfd, 0x50, 0x97, 0x2b, 0xbf, 0xbe, 0x07, 0xe8, 0xca, 0xcf, 0xfe, - 0x9c, 0x05, 0x48, 0x79, 0x8d, 0xe8, 0x3b, 0xf9, 0x29, 0xa8, 0x29, 0xff, 0x11, 0xa1, 0x58, 0x69, - 0x16, 0x21, 0x01, 0x58, 0xe3, 0xf4, 0x70, 0x42, 0x7e, 0x5c, 0xf2, 0xef, 0x72, 0xda, 0xbf, 0x9a, - 0x71, 0x7d, 0xc1, 0xce, 0xed, 0xdf, 0x2b, 0xc1, 0x43, 0x5c, 0x24, 0x2f, 0x39, 0x81, 0xd3, 0x20, - 0x4d, 0xda, 0xab, 0x5e, 0xbd, 0x2c, 0x5c, 0x7a, 0x34, 0xf3, 0xa4, 0xbf, 0xf4, 0x61, 0xf7, 0x2e, - 0xdf, 0x73, 0x7c, 0x97, 0x2d, 0x04, 0x5e, 0x82, 0x59, 0xe3, 0x28, 0x86, 0xaa, 0x7c, 0x5f, 0x43, - 0xf0, 0xe2, 0x82, 0x08, 0x29, 0xb6, 0x24, 0xe4, 0x26, 0xc1, 0x8a, 0x10, 0x55, 0x5c, 0xfd, 0xd0, - 0xdd, 0xc2, 0xa4, 0x15, 0x32, 0xbe, 0x6b, 0xb8, 0xab, 0x2e, 0x8a, 0x72, 0xac, 0x30, 0xec, 0x26, - 0x8c, 0xca, 0x31, 0x6c, 0x5d, 0x21, 0x3b, 0x98, 0x6c, 0x50, 0xf9, 0xe3, 0xca, 0x22, 0xe3, 0xc9, - 0x0f, 0x25, 0x7f, 0x66, 0x4d, 0x20, 0x4e, 0xe3, 0xca, 0x64, 0xa0, 0xa5, 0xfc, 0x64, 0xa0, 0xf6, - 0xef, 0x59, 0x90, 0x15, 0x80, 0x46, 0xea, 0x43, 0x6b, 0xcf, 0xd4, 0x87, 0x07, 0x48, 0x1e, 0xf8, - 0xb3, 0x30, 0xe8, 0x24, 0x54, 0x67, 0xe1, 0xa7, 0xfc, 0xf2, 0xbd, 0x5d, 0x04, 0x2d, 0x85, 0x75, - 0x6f, 0xc3, 0x63, 0xa7, 0x7b, 0xb3, 0x39, 0xfb, 0xaf, 0xfa, 0x60, 0xac, 0x23, 0x98, 0x09, 0x3d, - 0x0f, 0x43, 0x6a, 0x28, 0xa4, 0xfd, 0xac, 0x66, 0xba, 0x2c, 0x6a, 0x18, 0x4e, 0x61, 0xf6, 0xb0, - 0x1f, 0x16, 0xe0, 0x64, 0x44, 0x5e, 0x6d, 0x93, 0x36, 0x99, 0xde, 0x48, 0x48, 0xb4, 0x4a, 0xdc, - 0x30, 0xa8, 0xf3, 0x04, 0x9d, 0xe5, 0x99, 0x87, 0xef, 0xec, 0x4e, 0x9c, 0xc4, 0x9d, 0x60, 0x9c, - 0x57, 0x07, 0xb5, 0x60, 0xd8, 0x37, 0x55, 0x4e, 0x71, 0xde, 0xb8, 0x27, 0x6d, 0x55, 0x2d, 0x89, - 0x54, 0x31, 0x4e, 0x13, 0x48, 0xeb, 0xad, 0x95, 0xfb, 0xa4, 0xb7, 0x7e, 0x52, 0xeb, 0xad, 0xdc, - 0x63, 0xe1, 0x83, 0x05, 0x07, 0xb3, 0x1d, 0xb5, 0xe2, 0xfa, 0x22, 0x54, 0xa5, 0x37, 0x57, 0x4f, - 0x5e, 0x50, 0x66, 0x3b, 0x5d, 0x18, 0xe8, 0x13, 0xf0, 0xe3, 0x17, 0xa2, 0xc8, 0x18, 0xcc, 0xab, - 0x61, 0x32, 0xed, 0xfb, 0xe1, 0x2d, 0xaa, 0x13, 0x5c, 0x8b, 0x89, 0x30, 0xe8, 0xd8, 0x77, 0x4b, - 0x90, 0x73, 0x36, 0xa2, 0xfb, 0x51, 0x2b, 0x22, 0xa9, 0xfd, 0x78, 0x30, 0x65, 0x04, 0xdd, 0xe6, - 0x1e, 0x6f, 0x5c, 0xe4, 0x7e, 0xa0, 0xe8, 0xb3, 0x9d, 0x76, 0x82, 0x53, 0xec, 0x48, 0x39, 0xc2, - 0x9d, 0x07, 0xd0, 0xfa, 0xa3, 0x88, 0xb0, 0x50, 0x17, 0xea, 0x5a, 0xcd, 0xc4, 0x06, 0x16, 0x3d, - 0xea, 0x7b, 0x41, 0x9c, 0x38, 0xbe, 0x7f, 0xc9, 0x0b, 0x12, 0x61, 0xb3, 0x54, 0xba, 0xc5, 0x82, - 0x06, 0x61, 0x13, 0xef, 0xec, 0xbb, 0x8c, 0xf9, 0x3b, 0xc8, 0xbc, 0x6f, 0xc2, 0x99, 0x79, 0x2f, - 0x51, 0x71, 0x41, 0x6a, 0xbd, 0x51, 0xf5, 0x50, 0xc5, 0xb9, 0x59, 0x5d, 0xe3, 0xdc, 0x8c, 0xb8, - 0x9c, 0x52, 0x3a, 0x8c, 0x28, 0x1b, 0x97, 0x63, 0x3f, 0x0f, 0xa7, 0xe6, 0xbd, 0xe4, 0xa2, 0xe7, - 0x93, 0x03, 0x12, 0xb1, 0x7f, 0xb7, 0x1f, 0x86, 0xcc, 0x08, 0xd7, 0x83, 0x84, 0xea, 0x7d, 0x81, - 0x6a, 0x80, 0xe2, 0xeb, 0x3c, 0x75, 0x1d, 0x79, 0xe3, 0xd0, 0xe1, 0xb6, 0xf9, 0x23, 0x66, 0x28, - 0x81, 0x9a, 0x26, 0x36, 0x3b, 0x80, 0x6e, 0x41, 0x65, 0x83, 0xc5, 0x8d, 0x94, 0x8b, 0xf0, 0xd9, - 0xc8, 0x1b, 0x51, 0xbd, 0x1d, 0x79, 0xe4, 0x09, 0xa7, 0x47, 0x05, 0x77, 0x94, 0x0e, 0x46, 0x34, - 0x1c, 0x8a, 0x45, 0x18, 0xa2, 0xc2, 0xe8, 0x26, 0x12, 0x2a, 0xf7, 0x20, 0x12, 0x52, 0x0c, 0xba, - 0xff, 0x3e, 0x31, 0x68, 0x16, 0x03, 0x94, 0x6c, 0x32, 0xb5, 0x52, 0x44, 0x40, 0x0c, 0xb0, 0x41, - 0x30, 0x62, 0x80, 0x52, 0x60, 0x9c, 0xc5, 0x47, 0x1f, 0x53, 0x2c, 0xbe, 0x5a, 0x84, 0xb9, 0xd7, - 0x5c, 0xd1, 0x47, 0xcd, 0xdd, 0x3f, 0x57, 0x82, 0x91, 0xf9, 0xa0, 0xbd, 0x32, 0xbf, 0xd2, 0x5e, - 0xf7, 0x3d, 0xf7, 0x0a, 0xd9, 0xa1, 0x2c, 0x7c, 0x8b, 0xec, 0x2c, 0xcc, 0x89, 0x1d, 0xa4, 0xd6, - 0xcc, 0x15, 0x5a, 0x88, 0x39, 0x8c, 0x32, 0xa3, 0x0d, 0x2f, 0x68, 0x90, 0xa8, 0x15, 0x79, 0xc2, - 0x12, 0x6b, 0x30, 0xa3, 0x8b, 0x1a, 0x84, 0x4d, 0x3c, 0xda, 0x76, 0x78, 0x2b, 0x20, 0x51, 0x56, - 0xbf, 0x5e, 0xa6, 0x85, 0x98, 0xc3, 0x28, 0x52, 0x12, 0xb5, 0xe3, 0x44, 0x2c, 0x46, 0x85, 0xb4, - 0x46, 0x0b, 0x31, 0x87, 0xd1, 0x9d, 0x1e, 0xb7, 0xd7, 0x99, 0x4b, 0x4c, 0x26, 0xdc, 0x62, 0x95, - 0x17, 0x63, 0x09, 0xa7, 0xa8, 0x5b, 0x64, 0x67, 0x8e, 0x1e, 0xc6, 0x33, 0x01, 0x61, 0x57, 0x78, - 0x31, 0x96, 0x70, 0x96, 0x42, 0x34, 0x3d, 0x1c, 0x3f, 0x74, 0x29, 0x44, 0xd3, 0xdd, 0xef, 0x72, - 0xac, 0xff, 0x75, 0x0b, 0x86, 0x4c, 0x47, 0x36, 0xd4, 0xc8, 0xe8, 0xc2, 0xcb, 0x1d, 0x19, 0xa8, - 0xdf, 0x93, 0xf7, 0x3a, 0x63, 0xc3, 0x4b, 0xc2, 0x56, 0xfc, 0x34, 0x09, 0x1a, 0x5e, 0x40, 0x98, - 0xa3, 0x01, 0x77, 0x80, 0x4b, 0x79, 0xc9, 0xcd, 0x86, 0x75, 0x72, 0x0f, 0xca, 0xb4, 0x7d, 0x03, - 0xc6, 0x3a, 0xa2, 0x00, 0x7b, 0x50, 0x41, 0xf6, 0x8d, 0xc1, 0xb6, 0x31, 0x0c, 0xd2, 0x86, 0x65, - 0x1a, 0xab, 0x59, 0x18, 0xe3, 0x1b, 0x89, 0x52, 0x5a, 0x75, 0x37, 0x49, 0x53, 0x45, 0x76, 0x32, - 0xb3, 0xff, 0xf5, 0x2c, 0x10, 0x77, 0xe2, 0xdb, 0x9f, 0xb7, 0x60, 0x38, 0x15, 0x98, 0x59, 0x90, - 0xb2, 0xc4, 0x76, 0x5a, 0xc8, 0xfc, 0x2a, 0x99, 0x73, 0x79, 0x99, 0x09, 0x53, 0xbd, 0xd3, 0x34, - 0x08, 0x9b, 0x78, 0xf6, 0x97, 0x4a, 0x50, 0x95, 0xbe, 0x29, 0x3d, 0x74, 0xe5, 0xb3, 0x16, 0x0c, - 0xab, 0xab, 0x16, 0x66, 0xc3, 0x2b, 0x15, 0x11, 0xaa, 0x42, 0x7b, 0xa0, 0xac, 0x00, 0xc1, 0x46, - 0xa8, 0x35, 0x77, 0x6c, 0x12, 0xc3, 0x69, 0xda, 0xe8, 0x3a, 0x40, 0xbc, 0x13, 0x27, 0xa4, 0x69, - 0x58, 0x13, 0x6d, 0x63, 0xc7, 0x4d, 0xba, 0x61, 0x44, 0xe8, 0xfe, 0xba, 0x1a, 0xd6, 0xc9, 0xaa, - 0xc2, 0xd4, 0x2a, 0x94, 0x2e, 0xc3, 0x46, 0x4b, 0xf6, 0x3f, 0x2a, 0xc1, 0x89, 0x6c, 0x97, 0xd0, - 0x07, 0x61, 0x48, 0x52, 0x37, 0x4e, 0x9d, 0xd2, 0xb3, 0x66, 0x08, 0x1b, 0xb0, 0xbb, 0xbb, 0x13, - 0x13, 0x9d, 0x2f, 0x7d, 0x4e, 0x9a, 0x28, 0x38, 0xd5, 0x18, 0xbf, 0xef, 0x12, 0x17, 0xb3, 0x33, - 0x3b, 0xd3, 0xad, 0x96, 0xb8, 0xb4, 0x32, 0xee, 0xbb, 0x4c, 0x28, 0xce, 0x60, 0xa3, 0x15, 0x38, - 0x65, 0x94, 0x5c, 0x25, 0x5e, 0x63, 0x73, 0x3d, 0x8c, 0xe4, 0x09, 0xec, 0x11, 0xed, 0x32, 0xd7, - 0x89, 0x83, 0x73, 0x6b, 0x52, 0x69, 0xef, 0x3a, 0x2d, 0xc7, 0xf5, 0x92, 0x1d, 0x61, 0x1e, 0x55, - 0xbc, 0x69, 0x56, 0x94, 0x63, 0x85, 0x61, 0x2f, 0x41, 0x5f, 0x8f, 0x2b, 0xa8, 0x27, 0xcd, 0xff, - 0x45, 0xa8, 0xd2, 0xe6, 0xa4, 0x7a, 0x57, 0x44, 0x93, 0x21, 0x54, 0xe5, 0xbb, 0x49, 0xc8, 0x86, - 0xb2, 0xe7, 0xc8, 0x2b, 0x45, 0xf5, 0x59, 0x0b, 0x71, 0xdc, 0x66, 0x87, 0x69, 0x0a, 0x44, 0x8f, - 0x43, 0x99, 0xdc, 0x6e, 0x65, 0xef, 0x0e, 0x2f, 0xdc, 0x6e, 0x79, 0x11, 0x89, 0x29, 0x12, 0xb9, - 0xdd, 0x42, 0x67, 0xa1, 0xe4, 0xd5, 0x85, 0x90, 0x02, 0x81, 0x53, 0x5a, 0x98, 0xc3, 0x25, 0xaf, - 0x6e, 0xdf, 0x86, 0x9a, 0x7a, 0xa8, 0x09, 0x6d, 0x49, 0xde, 0x6d, 0x15, 0xe1, 0x4c, 0x26, 0xdb, - 0xed, 0xc2, 0xb5, 0xdb, 0x00, 0x3a, 0x0c, 0xb4, 0x28, 0xfe, 0x72, 0x0e, 0xfa, 0xdc, 0x50, 0x44, - 0xcf, 0x57, 0x75, 0x33, 0x8c, 0x69, 0x33, 0x88, 0x7d, 0x03, 0x46, 0xae, 0x04, 0xe1, 0x2d, 0xf6, - 0x9e, 0x02, 0x4b, 0x1f, 0x48, 0x1b, 0xde, 0xa0, 0x3f, 0xb2, 0x2a, 0x02, 0x83, 0x62, 0x0e, 0x53, - 0x89, 0xcd, 0x4a, 0xdd, 0x12, 0x9b, 0xd9, 0x1f, 0xb7, 0x60, 0x48, 0xc5, 0x93, 0xcd, 0x6f, 0x6f, - 0xd1, 0x76, 0x1b, 0x51, 0xd8, 0x6e, 0x65, 0xdb, 0x65, 0x6f, 0xc2, 0x61, 0x0e, 0x33, 0x03, 0x2d, - 0x4b, 0xfb, 0x04, 0x5a, 0x9e, 0x83, 0xbe, 0x2d, 0x2f, 0xa8, 0x67, 0xdf, 0x06, 0xba, 0xe2, 0x05, - 0x75, 0xcc, 0x20, 0xb4, 0x0b, 0x27, 0x54, 0x17, 0xa4, 0x40, 0x78, 0x1e, 0x86, 0xd6, 0xdb, 0x9e, - 0x5f, 0x97, 0x79, 0x11, 0x33, 0x16, 0x95, 0x19, 0x03, 0x86, 0x53, 0x98, 0xf4, 0x5c, 0xb7, 0xee, - 0x05, 0x4e, 0xb4, 0xb3, 0xa2, 0x25, 0x90, 0x62, 0x4a, 0x33, 0x0a, 0x82, 0x0d, 0x2c, 0xfb, 0xf5, - 0x32, 0x8c, 0xa4, 0xa3, 0xea, 0x7a, 0x38, 0x5e, 0x3d, 0x0e, 0x15, 0x16, 0x68, 0x97, 0x9d, 0x5a, - 0x9e, 0x4a, 0x90, 0xc3, 0x50, 0x0c, 0xfd, 0x3c, 0x7b, 0x48, 0x31, 0xef, 0x6a, 0xa9, 0x4e, 0x2a, - 0x3b, 0x0c, 0x73, 0xb9, 0x13, 0x09, 0x4b, 0x04, 0x29, 0xf4, 0x29, 0x0b, 0x06, 0xc2, 0x96, 0x99, - 0x10, 0xeb, 0x03, 0x45, 0x46, 0x1c, 0x8a, 0x30, 0x24, 0xa1, 0x11, 0xab, 0xa9, 0x97, 0xd3, 0x21, - 0x49, 0x9f, 0x7d, 0x37, 0x0c, 0x99, 0x98, 0xfb, 0x29, 0xc5, 0x55, 0x53, 0x29, 0xfe, 0xac, 0xb9, - 0x28, 0x44, 0x4c, 0x65, 0x0f, 0xdb, 0xed, 0x1a, 0x54, 0x5c, 0xe5, 0x97, 0x70, 0x4f, 0xd9, 0x74, - 0x55, 0x3a, 0x0f, 0x76, 0x37, 0xc5, 0x5b, 0xb3, 0xbf, 0x6b, 0x19, 0xeb, 0x03, 0x93, 0x78, 0xa1, - 0x8e, 0x22, 0x28, 0x37, 0xb6, 0xb7, 0x84, 0x2a, 0x7a, 0xb9, 0xa0, 0xe1, 0x9d, 0xdf, 0xde, 0xd2, - 0x6b, 0xdc, 0x2c, 0xc5, 0x94, 0x58, 0x0f, 0xc6, 0xc2, 0x54, 0xe8, 0x6d, 0x79, 0xff, 0xd0, 0x5b, - 0xfb, 0x8d, 0x12, 0x8c, 0x75, 0x2c, 0x2a, 0xf4, 0x1a, 0x54, 0x22, 0xfa, 0x95, 0xe2, 0xf3, 0x16, - 0x0b, 0x0b, 0x96, 0x8d, 0x17, 0xea, 0x5a, 0xee, 0xa6, 0xcb, 0x31, 0x27, 0x89, 0x2e, 0x03, 0xd2, - 0xde, 0x33, 0xca, 0x52, 0xc9, 0x3f, 0xf9, 0xac, 0xa8, 0x8a, 0xa6, 0x3b, 0x30, 0x70, 0x4e, 0x2d, - 0xf4, 0x42, 0xd6, 0xe0, 0x59, 0x4e, 0x9b, 0xb3, 0xf7, 0xb2, 0x5d, 0xda, 0xbf, 0x5d, 0x82, 0xe1, - 0x54, 0x7e, 0x32, 0xe4, 0x43, 0x95, 0xf8, 0xec, 0xae, 0x41, 0x0a, 0x9b, 0xc3, 0x66, 0x1b, 0x57, - 0x02, 0xf2, 0x82, 0x68, 0x17, 0x2b, 0x0a, 0x0f, 0xc6, 0x9d, 0xff, 0xf3, 0x30, 0x24, 0x3b, 0xf4, - 0x01, 0xa7, 0xe9, 0x8b, 0x01, 0x54, 0x6b, 0xf4, 0x82, 0x01, 0xc3, 0x29, 0x4c, 0xfb, 0xf7, 0xcb, - 0x30, 0xce, 0x2f, 0x67, 0xea, 0x6a, 0xe5, 0x2d, 0xc9, 0xf3, 0xd6, 0x5f, 0xd3, 0x59, 0x04, 0xad, - 0x22, 0x9e, 0xd4, 0xec, 0x46, 0xa8, 0x27, 0x87, 0xb1, 0xaf, 0x66, 0x1c, 0xc6, 0xb8, 0xda, 0xdd, - 0x38, 0xa2, 0x1e, 0xfd, 0x70, 0x79, 0x90, 0xfd, 0xfd, 0x12, 0x8c, 0x66, 0x5e, 0x4e, 0x41, 0xaf, - 0xa7, 0x93, 0x6d, 0x5b, 0x45, 0xd8, 0xd4, 0xf7, 0x7c, 0x4c, 0xe3, 0x60, 0x29, 0xb7, 0xef, 0xd3, - 0x56, 0xb1, 0xbf, 0x53, 0x82, 0x91, 0xf4, 0x93, 0x2f, 0x0f, 0xe0, 0x48, 0xbd, 0x03, 0x6a, 0xec, - 0x55, 0x03, 0xf6, 0x52, 0x31, 0x37, 0xc9, 0xf3, 0x04, 0xf2, 0xb2, 0x10, 0x6b, 0xf8, 0x03, 0x91, - 0xc9, 0xdc, 0xfe, 0x87, 0x16, 0x9c, 0xe6, 0x5f, 0x99, 0x5d, 0x87, 0x7f, 0x3d, 0x6f, 0x74, 0x5f, - 0x2e, 0xb6, 0x83, 0x99, 0xec, 0x97, 0xfb, 0x8d, 0x2f, 0x7b, 0x58, 0x54, 0xf4, 0x36, 0xbd, 0x14, - 0x1e, 0xc0, 0xce, 0x1e, 0x68, 0x31, 0xd8, 0xdf, 0x29, 0x83, 0x7e, 0x4b, 0x15, 0x79, 0x22, 0x7a, - 0xb4, 0x90, 0x2c, 0xa0, 0xab, 0x3b, 0x81, 0xab, 0x5f, 0x6d, 0xad, 0x66, 0x82, 0x47, 0x7f, 0xc9, - 0x82, 0x41, 0x2f, 0xf0, 0x12, 0xcf, 0x61, 0xc7, 0xe8, 0x62, 0x1e, 0x44, 0x54, 0xe4, 0x16, 0x78, - 0xcb, 0x61, 0x64, 0xde, 0xe3, 0x28, 0x62, 0xd8, 0xa4, 0x8c, 0x3e, 0x2c, 0x7c, 0xba, 0xcb, 0x85, - 0xc5, 0x3d, 0x57, 0x33, 0x8e, 0xdc, 0x2d, 0xaa, 0x78, 0x25, 0x51, 0x41, 0xe9, 0x02, 0x30, 0x6d, - 0x4a, 0x25, 0x94, 0xd6, 0xcf, 0xf3, 0xd3, 0x62, 0xcc, 0x09, 0xd9, 0x31, 0xa0, 0xce, 0xb1, 0x38, - 0xa0, 0xbf, 0xec, 0x14, 0xd4, 0x9c, 0x76, 0x12, 0x36, 0xe9, 0x30, 0x89, 0xab, 0x26, 0xed, 0x11, - 0x2c, 0x01, 0x58, 0xe3, 0xd8, 0xaf, 0x57, 0x20, 0x13, 0xce, 0x89, 0x6e, 0x9b, 0xef, 0x00, 0x5b, - 0xc5, 0xbe, 0x03, 0xac, 0x3a, 0x93, 0xf7, 0x16, 0x30, 0x6a, 0x40, 0xa5, 0xb5, 0xe9, 0xc4, 0x52, - 0xad, 0x7e, 0x51, 0x9d, 0xe3, 0x68, 0xe1, 0xdd, 0xdd, 0x89, 0x9f, 0xe9, 0xcd, 0xea, 0x4a, 0xd7, - 0xea, 0x14, 0x4f, 0x41, 0xa3, 0x49, 0xb3, 0x36, 0x30, 0x6f, 0xff, 0x20, 0x4f, 0x42, 0x7e, 0x42, - 0x3c, 0xdf, 0x80, 0x49, 0xdc, 0xf6, 0x13, 0xb1, 0x1a, 0x5e, 0x2c, 0x70, 0x97, 0xf1, 0x86, 0x75, - 0x22, 0x02, 0xfe, 0x1f, 0x1b, 0x44, 0xd1, 0x07, 0xa1, 0x16, 0x27, 0x4e, 0x94, 0xdc, 0x63, 0xe8, - 0xb0, 0x1a, 0xf4, 0x55, 0xd9, 0x08, 0xd6, 0xed, 0xa1, 0x97, 0x58, 0x52, 0x64, 0x2f, 0xde, 0xbc, - 0xc7, 0x50, 0x0c, 0x99, 0x40, 0x59, 0xb4, 0x80, 0x8d, 0xd6, 0xd0, 0x79, 0x00, 0xb6, 0xb6, 0xb9, - 0xff, 0x61, 0x95, 0x59, 0x99, 0x14, 0x2b, 0xc4, 0x0a, 0x82, 0x0d, 0x2c, 0xfb, 0x27, 0x21, 0x9d, - 0x49, 0x03, 0x4d, 0xc8, 0xc4, 0x1d, 0xdc, 0x0a, 0xcd, 0x42, 0x2a, 0x52, 0x39, 0x36, 0x7e, 0xd3, - 0x02, 0x33, 0xdd, 0x07, 0x7a, 0x95, 0xe7, 0x15, 0xb1, 0x8a, 0xb8, 0x39, 0x34, 0xda, 0x9d, 0x5c, - 0x72, 0x5a, 0x99, 0x2b, 0x6c, 0x99, 0x5c, 0xe4, 0xec, 0xbb, 0xa0, 0x2a, 0xa1, 0x07, 0x52, 0xea, - 0x3e, 0x06, 0x27, 0x65, 0x78, 0xa6, 0xb4, 0x9b, 0x8a, 0x5b, 0xa7, 0xfd, 0x4d, 0x3f, 0xd2, 0x9e, - 0x53, 0xea, 0x66, 0xcf, 0xe9, 0xe1, 0x35, 0xe8, 0xdf, 0xb2, 0xe0, 0x5c, 0xb6, 0x03, 0xf1, 0x52, - 0x18, 0x78, 0x49, 0x18, 0xad, 0x92, 0x24, 0xf1, 0x82, 0x06, 0x4b, 0xa7, 0x76, 0xcb, 0x89, 0x64, - 0xb6, 0x7a, 0xc6, 0x28, 0x6f, 0x38, 0x51, 0x80, 0x59, 0x29, 0xda, 0x81, 0x7e, 0xee, 0xa4, 0x26, - 0xb4, 0xf5, 0x43, 0xee, 0x8d, 0x9c, 0xe1, 0xd0, 0xc7, 0x05, 0xee, 0x20, 0x87, 0x05, 0x41, 0xfb, - 0xfb, 0x16, 0xa0, 0xe5, 0x6d, 0x12, 0x45, 0x5e, 0xdd, 0x70, 0xab, 0x63, 0xcf, 0x20, 0x19, 0xcf, - 0x1d, 0x99, 0xc1, 0xc3, 0x99, 0x67, 0x90, 0x8c, 0x7f, 0xf9, 0xcf, 0x20, 0x95, 0x0e, 0xf6, 0x0c, - 0x12, 0x5a, 0x86, 0xd3, 0x4d, 0x7e, 0xdc, 0xe0, 0x4f, 0x8b, 0xf0, 0xb3, 0x87, 0x8a, 0x73, 0x3b, - 0x73, 0x67, 0x77, 0xe2, 0xf4, 0x52, 0x1e, 0x02, 0xce, 0xaf, 0x67, 0xbf, 0x0b, 0x10, 0xf7, 0xa6, - 0x9b, 0xcd, 0xf3, 0x55, 0xea, 0x6a, 0x7e, 0xb1, 0xbf, 0x52, 0x81, 0xd1, 0x4c, 0x2e, 0x63, 0x7a, - 0xd4, 0xeb, 0x74, 0x8e, 0x3a, 0xb4, 0xfc, 0xee, 0xec, 0x5e, 0x4f, 0xee, 0x56, 0x01, 0x54, 0xbc, - 0xa0, 0xd5, 0x4e, 0x8a, 0x09, 0xb3, 0xe5, 0x9d, 0x58, 0xa0, 0x0d, 0x1a, 0xe6, 0x62, 0xfa, 0x17, - 0x73, 0x32, 0x45, 0x3a, 0x6f, 0xa5, 0x94, 0xf1, 0xbe, 0xfb, 0x64, 0x0e, 0xf8, 0x84, 0x76, 0xa5, - 0xaa, 0x14, 0x61, 0x58, 0xcc, 0x2c, 0x96, 0xa3, 0xbe, 0x6a, 0xff, 0x66, 0x09, 0x06, 0x8d, 0x49, - 0x43, 0xbf, 0x96, 0x4e, 0x86, 0x65, 0x15, 0xf7, 0x49, 0xac, 0xfd, 0x49, 0x9d, 0xee, 0x8a, 0x7f, - 0xd2, 0x13, 0x9d, 0x79, 0xb0, 0xee, 0xee, 0x4e, 0x9c, 0xc8, 0x64, 0xba, 0x4a, 0xe5, 0xc6, 0x3a, - 0xfb, 0x51, 0x18, 0xcd, 0x34, 0x93, 0xf3, 0xc9, 0x6b, 0xe6, 0x27, 0x1f, 0xda, 0x2c, 0x65, 0x0e, - 0xd9, 0x37, 0xe8, 0x90, 0x89, 0xe8, 0xbe, 0xd0, 0x27, 0x3d, 0xd8, 0x60, 0x33, 0x41, 0xbc, 0xa5, - 0x1e, 0x83, 0x78, 0x9f, 0x84, 0x6a, 0x2b, 0xf4, 0x3d, 0xd7, 0x53, 0xb9, 0x29, 0x59, 0xd8, 0xf0, - 0x8a, 0x28, 0xc3, 0x0a, 0x8a, 0x6e, 0x41, 0xed, 0xe6, 0xad, 0x84, 0xdf, 0xfe, 0x08, 0xfb, 0x76, - 0x51, 0x97, 0x3e, 0x4a, 0x69, 0x51, 0xd7, 0x4b, 0x58, 0xd3, 0x42, 0x36, 0xf4, 0x33, 0x21, 0x28, - 0x23, 0x12, 0x98, 0xed, 0x9d, 0x49, 0xc7, 0x18, 0x0b, 0x88, 0xfd, 0xf5, 0x1a, 0x9c, 0xca, 0x4b, - 0x28, 0x8f, 0x3e, 0x02, 0xfd, 0xbc, 0x8f, 0xc5, 0xbc, 0x59, 0x92, 0x47, 0x63, 0x9e, 0x35, 0x28, - 0xba, 0xc5, 0x7e, 0x63, 0x41, 0x53, 0x50, 0xf7, 0x9d, 0x75, 0xb1, 0x42, 0x8e, 0x86, 0xfa, 0xa2, - 0xa3, 0xa9, 0x2f, 0x3a, 0x9c, 0xba, 0xef, 0xac, 0xa3, 0xdb, 0x50, 0x69, 0x78, 0x09, 0x71, 0x84, - 0x11, 0xe1, 0xc6, 0x91, 0x10, 0x27, 0x0e, 0xd7, 0xd2, 0xd8, 0x4f, 0xcc, 0x09, 0xa2, 0xaf, 0x59, - 0x30, 0xba, 0x9e, 0xce, 0x1e, 0x20, 0x98, 0xa7, 0x73, 0x04, 0x8f, 0x06, 0xa4, 0x09, 0xf1, 0x77, - 0xc0, 0x32, 0x85, 0x38, 0xdb, 0x1d, 0xf4, 0x49, 0x0b, 0x06, 0x36, 0x3c, 0xdf, 0xc8, 0xdb, 0x7c, - 0x04, 0x93, 0x73, 0x91, 0x11, 0xd0, 0x27, 0x0e, 0xfe, 0x3f, 0xc6, 0x92, 0x72, 0x37, 0x49, 0xd5, - 0x7f, 0x58, 0x49, 0x35, 0x70, 0x9f, 0x24, 0xd5, 0x67, 0x2c, 0xa8, 0xa9, 0x91, 0x16, 0x51, 0xd8, - 0x1f, 0x3c, 0xc2, 0x29, 0xe7, 0x96, 0x13, 0xf5, 0x17, 0x6b, 0xe2, 0xe8, 0x8b, 0x16, 0x0c, 0x3a, - 0xaf, 0xb5, 0x23, 0x52, 0x27, 0xdb, 0x61, 0x2b, 0x16, 0x8f, 0x88, 0xbe, 0x5c, 0x7c, 0x67, 0xa6, - 0x29, 0x91, 0x39, 0xb2, 0xbd, 0xdc, 0x8a, 0x45, 0xb4, 0x94, 0x2e, 0xc0, 0x66, 0x17, 0xec, 0xdd, - 0x12, 0x4c, 0xec, 0xd3, 0x02, 0x7a, 0x1e, 0x86, 0xc2, 0xa8, 0xe1, 0x04, 0xde, 0x6b, 0x66, 0x3a, - 0x10, 0xa5, 0x65, 0x2d, 0x1b, 0x30, 0x9c, 0xc2, 0x34, 0xe3, 0xc4, 0x4b, 0xfb, 0xc4, 0x89, 0x9f, - 0x83, 0xbe, 0x88, 0xb4, 0xc2, 0xec, 0x61, 0x81, 0x45, 0x2a, 0x30, 0x08, 0x7a, 0x14, 0xca, 0x4e, - 0xcb, 0x13, 0x8e, 0x68, 0xea, 0x0c, 0x34, 0xbd, 0xb2, 0x80, 0x69, 0x79, 0x2a, 0x6d, 0x45, 0xe5, - 0x58, 0xd2, 0x56, 0x50, 0x31, 0x20, 0xee, 0x2e, 0xfa, 0xb5, 0x18, 0x48, 0xdf, 0x29, 0xd8, 0x6f, - 0x94, 0xe1, 0xd1, 0x3d, 0xd7, 0x8b, 0xf6, 0xc3, 0xb3, 0xf6, 0xf0, 0xc3, 0x93, 0xc3, 0x53, 0xda, - 0x6f, 0x78, 0xca, 0x5d, 0x86, 0xe7, 0x93, 0x74, 0x1b, 0xc8, 0x34, 0x2a, 0xc5, 0x3c, 0x03, 0xd9, - 0x2d, 0x2b, 0x8b, 0xd8, 0x01, 0x12, 0x8a, 0x35, 0x5d, 0x7a, 0x06, 0x48, 0xc5, 0x48, 0x57, 0x8a, - 0x10, 0x03, 0x5d, 0x53, 0x99, 0xf0, 0xb5, 0xdf, 0x2d, 0xf0, 0xda, 0xfe, 0x9d, 0x3e, 0x78, 0xbc, - 0x07, 0xee, 0x6d, 0xae, 0x62, 0xab, 0xc7, 0x55, 0xfc, 0x43, 0x3e, 0x4d, 0x9f, 0xce, 0x9d, 0x26, - 0x5c, 0xfc, 0x34, 0xed, 0x3d, 0x43, 0xe8, 0x29, 0xa8, 0x7a, 0x41, 0x4c, 0xdc, 0x76, 0xc4, 0x7d, - 0x92, 0x8d, 0x30, 0xa6, 0x05, 0x51, 0x8e, 0x15, 0x06, 0x3d, 0xd3, 0xb9, 0x0e, 0xdd, 0xfe, 0x03, - 0x05, 0xc5, 0xee, 0x9a, 0x11, 0x51, 0x5c, 0xa5, 0x98, 0x9d, 0xa6, 0x1c, 0x80, 0x93, 0xb1, 0xff, - 0x86, 0x05, 0x67, 0xbb, 0x8b, 0x58, 0xf4, 0x0c, 0x0c, 0xae, 0x47, 0x4e, 0xe0, 0x6e, 0xb2, 0x07, - 0x80, 0xe5, 0xd2, 0x61, 0xdf, 0xab, 0x8b, 0xb1, 0x89, 0x83, 0x66, 0x61, 0x8c, 0x7b, 0x6e, 0x18, - 0x18, 0x32, 0xf2, 0xf7, 0xce, 0xee, 0xc4, 0xd8, 0x5a, 0x16, 0x88, 0x3b, 0xf1, 0xed, 0x1f, 0x94, - 0xf3, 0xbb, 0xc5, 0x55, 0xb1, 0x83, 0xac, 0x66, 0xb1, 0x56, 0x4b, 0x3d, 0x70, 0xdc, 0xf2, 0x71, - 0x73, 0xdc, 0xbe, 0x6e, 0x1c, 0x17, 0xcd, 0xc1, 0x09, 0xe3, 0x85, 0x26, 0x1e, 0xcd, 0xcd, 0xdd, - 0x92, 0x55, 0x8a, 0x93, 0x95, 0x0c, 0x1c, 0x77, 0xd4, 0x78, 0xc0, 0x97, 0xde, 0xaf, 0x97, 0xe0, - 0x4c, 0x57, 0xed, 0xf7, 0x98, 0x24, 0x8a, 0x39, 0xfd, 0x7d, 0xc7, 0x33, 0xfd, 0xe6, 0xa4, 0x54, - 0xf6, 0x9b, 0x14, 0xfb, 0x4f, 0x4a, 0x5d, 0x37, 0x02, 0x3d, 0x09, 0xfd, 0xc8, 0x8e, 0xd2, 0x0b, - 0x30, 0xec, 0xb4, 0x5a, 0x1c, 0x8f, 0x79, 0xd1, 0x66, 0x52, 0x2a, 0x4d, 0x9b, 0x40, 0x9c, 0xc6, - 0xed, 0x49, 0xa7, 0xf9, 0x53, 0x0b, 0x6a, 0x98, 0x6c, 0x70, 0x6e, 0x84, 0x6e, 0x8a, 0x21, 0xb2, - 0x8a, 0xc8, 0x1f, 0x4b, 0x07, 0x36, 0xf6, 0x58, 0x5e, 0xd5, 0xbc, 0xc1, 0xee, 0x7c, 0xb1, 0xab, - 0x74, 0xa0, 0x17, 0xbb, 0xd4, 0x9b, 0x4d, 0xe5, 0xee, 0x6f, 0x36, 0xd9, 0xdf, 0x1b, 0xa0, 0x9f, - 0xd7, 0x0a, 0x67, 0x23, 0x52, 0x8f, 0xe9, 0xfc, 0xb6, 0x23, 0x5f, 0x2c, 0x12, 0x35, 0xbf, 0xd7, - 0xf0, 0x22, 0xa6, 0xe5, 0xa9, 0x0b, 0xb2, 0xd2, 0x81, 0x12, 0xca, 0x94, 0xf7, 0x4d, 0x28, 0xf3, - 0x02, 0x0c, 0xc7, 0xf1, 0xe6, 0x4a, 0xe4, 0x6d, 0x3b, 0x09, 0xb9, 0x42, 0x76, 0x84, 0xee, 0xab, - 0x93, 0x40, 0xac, 0x5e, 0xd2, 0x40, 0x9c, 0xc6, 0x45, 0xf3, 0x30, 0xa6, 0xd3, 0xba, 0x90, 0x28, - 0x61, 0x31, 0x17, 0x7c, 0x25, 0xa8, 0x88, 0x6f, 0x9d, 0x08, 0x46, 0x20, 0xe0, 0xce, 0x3a, 0x94, - 0x9f, 0xa6, 0x0a, 0x69, 0x47, 0xfa, 0xd3, 0xfc, 0x34, 0xd5, 0x0e, 0xed, 0x4b, 0x47, 0x0d, 0xb4, - 0x04, 0x27, 0xf9, 0xc2, 0x98, 0x6e, 0xb5, 0x8c, 0x2f, 0x1a, 0x48, 0xe7, 0xed, 0x9c, 0xef, 0x44, - 0xc1, 0x79, 0xf5, 0xd0, 0x73, 0x30, 0xa8, 0x8a, 0x17, 0xe6, 0xc4, 0xdd, 0x8e, 0xb2, 0x2d, 0xa9, - 0x66, 0x16, 0xea, 0xd8, 0xc4, 0x43, 0x1f, 0x80, 0x87, 0xf5, 0x5f, 0x1e, 0x98, 0xc7, 0x2f, 0x3c, - 0xe7, 0x44, 0xc6, 0x2c, 0xf5, 0x42, 0xd0, 0x7c, 0x2e, 0x5a, 0x1d, 0x77, 0xab, 0x8f, 0xd6, 0xe1, - 0xac, 0x02, 0x5d, 0x08, 0x12, 0x16, 0x65, 0x13, 0x93, 0x19, 0x27, 0x26, 0xd7, 0x22, 0x5f, 0xbc, - 0x34, 0xad, 0x1e, 0x91, 0x9d, 0xf7, 0x92, 0x4b, 0x79, 0x98, 0x78, 0x11, 0xef, 0xd1, 0x0a, 0x9a, - 0x82, 0x1a, 0x09, 0x9c, 0x75, 0x9f, 0x2c, 0xcf, 0x2e, 0xb0, 0xcc, 0x5b, 0xc6, 0xfd, 0xea, 0x05, - 0x09, 0xc0, 0x1a, 0x47, 0xf9, 0xfd, 0x0e, 0x75, 0x7d, 0xd0, 0x78, 0x05, 0x4e, 0x35, 0xdc, 0x16, - 0xd5, 0x08, 0x3d, 0x97, 0x4c, 0xbb, 0xcc, 0xcd, 0x91, 0x4e, 0x0c, 0x4f, 0xa8, 0xaa, 0x9c, 0xda, - 0xe7, 0x67, 0x57, 0x3a, 0x70, 0x70, 0x6e, 0x4d, 0xe6, 0x0e, 0x1b, 0x85, 0xb7, 0x77, 0xc6, 0x4f, - 0x66, 0xdc, 0x61, 0x69, 0x21, 0xe6, 0x30, 0x74, 0x19, 0x10, 0x8b, 0x90, 0xb8, 0x94, 0x24, 0x2d, - 0xa5, 0x82, 0x8e, 0x9f, 0x62, 0x9f, 0xa4, 0x9c, 0xfb, 0x2e, 0x76, 0x60, 0xe0, 0x9c, 0x5a, 0x54, - 0xa3, 0x09, 0x42, 0xd6, 0xfa, 0xf8, 0xc3, 0x69, 0x8d, 0xe6, 0x2a, 0x2f, 0xc6, 0x12, 0x6e, 0xff, - 0x47, 0x0b, 0x86, 0xd5, 0xd6, 0x3e, 0x86, 0x70, 0x22, 0x3f, 0x1d, 0x4e, 0x34, 0x7f, 0x78, 0xe6, - 0xc8, 0x7a, 0xde, 0xc5, 0x27, 0xfd, 0x9b, 0x83, 0x00, 0x9a, 0x81, 0x2a, 0xd9, 0x65, 0x75, 0x95, - 0x5d, 0x0f, 0x2c, 0xf3, 0xca, 0xcb, 0xc8, 0x53, 0xb9, 0xbf, 0x19, 0x79, 0x56, 0xe1, 0xb4, 0xd4, - 0x2c, 0xf8, 0x65, 0xdf, 0xa5, 0x30, 0x56, 0xbc, 0xb0, 0x3a, 0xf3, 0xa8, 0x68, 0xe8, 0xf4, 0x42, - 0x1e, 0x12, 0xce, 0xaf, 0x9b, 0x52, 0x68, 0x06, 0xf6, 0xd5, 0x32, 0xd5, 0xf6, 0x5f, 0xdc, 0x90, - 0x4f, 0xf3, 0x64, 0xb6, 0xff, 0xe2, 0xc5, 0x55, 0xac, 0x71, 0xf2, 0x65, 0x40, 0xad, 0x20, 0x19, - 0x00, 0x07, 0x96, 0x01, 0x92, 0x1b, 0x0d, 0x76, 0xe5, 0x46, 0xf2, 0x52, 0x61, 0xa8, 0xeb, 0xa5, - 0xc2, 0x7b, 0x61, 0xc4, 0x0b, 0x36, 0x49, 0xe4, 0x25, 0xa4, 0xce, 0xf6, 0x02, 0xe3, 0x54, 0x55, - 0xad, 0x01, 0x2c, 0xa4, 0xa0, 0x38, 0x83, 0x9d, 0x66, 0xa1, 0x23, 0x3d, 0xb0, 0xd0, 0x2e, 0x82, - 0x6b, 0xb4, 0x18, 0xc1, 0x75, 0xe2, 0xf0, 0x82, 0x6b, 0xec, 0x48, 0x05, 0x17, 0x2a, 0x44, 0x70, - 0xf5, 0x24, 0x13, 0x8c, 0x93, 0xe9, 0xa9, 0x7d, 0x4e, 0xa6, 0xdd, 0xa4, 0xd6, 0xe9, 0x7b, 0x96, - 0x5a, 0xf9, 0x02, 0xe9, 0xa1, 0xa3, 0x16, 0x48, 0x9f, 0x29, 0xc1, 0x69, 0xcd, 0xb2, 0xe9, 0x46, - 0xf1, 0x36, 0x28, 0xd3, 0x62, 0x0f, 0xc1, 0xf1, 0x3b, 0x3a, 0x23, 0x10, 0x4e, 0xc7, 0xd4, 0x29, - 0x08, 0x36, 0xb0, 0x58, 0x3c, 0x19, 0x89, 0x58, 0x56, 0xe9, 0x2c, 0x3f, 0x9f, 0x15, 0xe5, 0x58, - 0x61, 0xd0, 0xa5, 0x48, 0x7f, 0x8b, 0x18, 0xdd, 0x6c, 0xbe, 0xc2, 0x59, 0x0d, 0xc2, 0x26, 0x1e, - 0x7a, 0x92, 0x13, 0x61, 0xbc, 0x84, 0xf2, 0xf4, 0x21, 0xf1, 0x78, 0xb6, 0x64, 0x1f, 0x0a, 0x2a, - 0xbb, 0xc3, 0x02, 0x07, 0x2b, 0x9d, 0xdd, 0x61, 0xee, 0x6e, 0x0a, 0xc3, 0xfe, 0x1f, 0x16, 0x9c, - 0xc9, 0x1d, 0x8a, 0x63, 0x90, 0xd3, 0xb7, 0xd3, 0x72, 0x7a, 0xb5, 0xa8, 0x43, 0x8c, 0xf1, 0x15, - 0x5d, 0x64, 0xf6, 0xbf, 0xb7, 0x60, 0x44, 0xe3, 0x1f, 0xc3, 0xa7, 0x7a, 0xe9, 0x4f, 0x2d, 0xee, - 0xbc, 0x56, 0xeb, 0xf8, 0xb6, 0xdf, 0x2f, 0x81, 0xca, 0x21, 0x3a, 0xed, 0xca, 0x0c, 0xcd, 0xfb, - 0xdc, 0x1a, 0xef, 0x40, 0x3f, 0xbb, 0xf4, 0x8e, 0x8b, 0x71, 0xe8, 0x49, 0xd3, 0x67, 0x17, 0xe8, - 0xda, 0xa1, 0x80, 0xfd, 0x8d, 0xb1, 0x20, 0xc8, 0x72, 0x9e, 0x7b, 0x31, 0x65, 0xfc, 0x75, 0x11, - 0x82, 0xa7, 0x73, 0x9e, 0x8b, 0x72, 0xac, 0x30, 0xa8, 0x24, 0xf1, 0xdc, 0x30, 0x98, 0xf5, 0x9d, - 0x58, 0x3e, 0xcc, 0xaa, 0x24, 0xc9, 0x82, 0x04, 0x60, 0x8d, 0xc3, 0xee, 0xc3, 0xbd, 0xb8, 0xe5, - 0x3b, 0x3b, 0xc6, 0xa9, 0xdc, 0xc8, 0x45, 0xa1, 0x40, 0xd8, 0xc4, 0xb3, 0x9b, 0x30, 0x9e, 0xfe, - 0x88, 0x39, 0xb2, 0xc1, 0x9c, 0x51, 0x7b, 0x1a, 0xce, 0x29, 0xa8, 0x39, 0xac, 0xd6, 0x62, 0xdb, - 0x11, 0x3c, 0x41, 0xbb, 0x64, 0x4a, 0x00, 0xd6, 0x38, 0xf6, 0x3f, 0xb0, 0xe0, 0x64, 0xce, 0xa0, - 0x15, 0x18, 0xe2, 0x98, 0x68, 0x6e, 0x93, 0xa7, 0x03, 0xbc, 0x1d, 0x06, 0xea, 0x64, 0xc3, 0x91, - 0xee, 0x8e, 0x06, 0xf7, 0x9c, 0xe3, 0xc5, 0x58, 0xc2, 0xed, 0xdf, 0x2e, 0xc1, 0x68, 0xba, 0xaf, - 0x31, 0x0b, 0x1b, 0xe2, 0xc3, 0xe4, 0xc5, 0x6e, 0xb8, 0x4d, 0xa2, 0x1d, 0xfa, 0xe5, 0x56, 0x26, - 0x6c, 0xa8, 0x03, 0x03, 0xe7, 0xd4, 0x62, 0x19, 0x84, 0xeb, 0x6a, 0xb4, 0xe5, 0x8a, 0xbc, 0x5e, - 0xe4, 0x8a, 0xd4, 0x93, 0x69, 0xba, 0x46, 0x28, 0x92, 0xd8, 0xa4, 0x4f, 0x75, 0x11, 0xe6, 0x87, - 0x3d, 0xd3, 0xf6, 0xfc, 0xc4, 0x0b, 0xc4, 0x27, 0x8b, 0xb5, 0xaa, 0x74, 0x91, 0xa5, 0x4e, 0x14, - 0x9c, 0x57, 0xcf, 0xfe, 0x7e, 0x1f, 0xa8, 0x90, 0x6a, 0xe6, 0xba, 0x56, 0x90, 0xe3, 0xdf, 0x41, - 0x83, 0xcf, 0xd4, 0xda, 0xea, 0xdb, 0xcb, 0x97, 0x84, 0x9b, 0x72, 0x4c, 0x7b, 0xae, 0x1a, 0xb0, - 0x35, 0x0d, 0xc2, 0x26, 0x1e, 0xed, 0x89, 0xef, 0x6d, 0x13, 0x5e, 0xa9, 0x3f, 0xdd, 0x93, 0x45, - 0x09, 0xc0, 0x1a, 0x87, 0xf6, 0xa4, 0xee, 0x6d, 0x6c, 0x08, 0xbb, 0x84, 0xea, 0x09, 0x1d, 0x1d, - 0xcc, 0x20, 0x3c, 0xc7, 0x7c, 0xb8, 0x25, 0xf4, 0x6f, 0x23, 0xc7, 0x7c, 0xb8, 0x85, 0x19, 0x84, - 0xce, 0x52, 0x10, 0x46, 0x4d, 0xc7, 0xf7, 0x5e, 0x23, 0x75, 0x45, 0x45, 0xe8, 0xdd, 0x6a, 0x96, - 0xae, 0x76, 0xa2, 0xe0, 0xbc, 0x7a, 0x74, 0x41, 0xb7, 0x22, 0x52, 0xf7, 0xdc, 0xc4, 0x6c, 0x0d, - 0xd2, 0x0b, 0x7a, 0xa5, 0x03, 0x03, 0xe7, 0xd4, 0x42, 0xd3, 0x30, 0x2a, 0x43, 0xe2, 0x65, 0xc2, - 0xa3, 0xc1, 0x74, 0x82, 0x15, 0x9c, 0x06, 0xe3, 0x2c, 0x3e, 0x65, 0x92, 0x4d, 0x91, 0x13, 0x8d, - 0xa9, 0xe9, 0x06, 0x93, 0x94, 0xb9, 0xd2, 0xb0, 0xc2, 0xb0, 0x3f, 0x51, 0xa6, 0x42, 0xbd, 0x4b, - 0xea, 0xc1, 0x63, 0x73, 0x34, 0x4d, 0xaf, 0xc8, 0xbe, 0x1e, 0x56, 0xe4, 0xb3, 0x30, 0x74, 0x33, - 0x0e, 0x03, 0xe5, 0xc4, 0x59, 0xe9, 0xea, 0xc4, 0x69, 0x60, 0xe5, 0x3b, 0x71, 0xf6, 0x17, 0xe5, - 0xc4, 0x39, 0x70, 0x8f, 0x4e, 0x9c, 0x7f, 0x58, 0x01, 0xf5, 0x5e, 0xcf, 0x55, 0x92, 0xdc, 0x0a, - 0xa3, 0x2d, 0x2f, 0x68, 0xb0, 0x54, 0x02, 0x5f, 0xb3, 0x60, 0x88, 0xef, 0x97, 0x45, 0x33, 0x08, - 0x6f, 0xa3, 0xa0, 0x87, 0x60, 0x52, 0xc4, 0x26, 0xd7, 0x0c, 0x42, 0x99, 0xb7, 0x7c, 0x4d, 0x10, - 0x4e, 0xf5, 0x08, 0x7d, 0x14, 0x40, 0x1a, 0x71, 0x37, 0x24, 0x07, 0x5e, 0x28, 0xa6, 0x7f, 0x98, - 0x6c, 0x68, 0x95, 0x7a, 0x4d, 0x11, 0xc1, 0x06, 0x41, 0xf4, 0x19, 0x1d, 0xa0, 0xc8, 0xa3, 0x3d, - 0x3e, 0x7c, 0x24, 0x63, 0xd3, 0x4b, 0x78, 0x22, 0x86, 0x01, 0x2f, 0x68, 0xd0, 0x75, 0x22, 0x9c, - 0xdd, 0xde, 0x96, 0x97, 0x86, 0x63, 0x31, 0x74, 0xea, 0x33, 0x8e, 0xef, 0x04, 0x2e, 0x89, 0x16, - 0x38, 0xba, 0xf9, 0xb8, 0x3e, 0x2b, 0xc0, 0xb2, 0xa1, 0x8e, 0x97, 0x8e, 0x2a, 0xbd, 0xbc, 0x74, - 0x74, 0xf6, 0x7d, 0x30, 0xd6, 0x31, 0x99, 0x07, 0x8a, 0x46, 0xbc, 0xf7, 0x40, 0x46, 0xfb, 0x77, - 0xfa, 0xb5, 0xd0, 0xba, 0x1a, 0xd6, 0xf9, 0xc3, 0x39, 0x91, 0x9e, 0x51, 0xa1, 0x32, 0x17, 0xb8, - 0x44, 0x8c, 0x07, 0xfa, 0x55, 0x21, 0x36, 0x49, 0xd2, 0x35, 0xda, 0x72, 0x22, 0x12, 0x1c, 0xf5, - 0x1a, 0x5d, 0x51, 0x44, 0xb0, 0x41, 0x10, 0x6d, 0xa6, 0xc2, 0x91, 0x2e, 0x1e, 0x3e, 0x1c, 0x89, - 0x25, 0x28, 0xcb, 0x7b, 0x5f, 0xe2, 0x8b, 0x16, 0x8c, 0x04, 0xa9, 0x95, 0x5b, 0x8c, 0x07, 0x72, - 0xfe, 0xae, 0xe0, 0xcf, 0xbd, 0xa5, 0xcb, 0x70, 0x86, 0x7e, 0x9e, 0x48, 0xab, 0x1c, 0x50, 0xa4, - 0xe9, 0x87, 0xbb, 0xfa, 0xbb, 0x3d, 0xdc, 0x85, 0x02, 0xf5, 0x72, 0xe1, 0x40, 0xe1, 0x2f, 0x17, - 0x42, 0xce, 0xab, 0x85, 0x37, 0xa0, 0xe6, 0x46, 0xc4, 0x49, 0xee, 0xf1, 0x11, 0x3b, 0xe6, 0xdb, - 0x31, 0x2b, 0x1b, 0xc0, 0xba, 0x2d, 0xfb, 0x7f, 0xf7, 0xc1, 0x09, 0x39, 0x22, 0x32, 0x7a, 0x81, - 0xca, 0x47, 0x4e, 0x57, 0xeb, 0xca, 0x4a, 0x3e, 0x5e, 0x92, 0x00, 0xac, 0x71, 0xa8, 0x3e, 0xd6, - 0x8e, 0xc9, 0x72, 0x8b, 0x04, 0x8b, 0xde, 0x7a, 0x2c, 0x2e, 0x63, 0xd5, 0x46, 0xb9, 0xa6, 0x41, - 0xd8, 0xc4, 0xa3, 0xba, 0xbd, 0x63, 0x28, 0xad, 0x86, 0x6e, 0x2f, 0x15, 0x55, 0x09, 0x47, 0xbf, - 0x92, 0x9b, 0x0b, 0xb9, 0x98, 0x98, 0xbf, 0x8e, 0xa0, 0x8d, 0x03, 0xbe, 0x7b, 0xfa, 0x77, 0x2d, - 0x38, 0xcd, 0x4b, 0xe5, 0x48, 0x5e, 0x6b, 0xd5, 0x9d, 0x84, 0xc4, 0xc5, 0xbc, 0x4d, 0x90, 0xd3, - 0x3f, 0x6d, 0x5e, 0xce, 0x23, 0x8b, 0xf3, 0x7b, 0x83, 0x5e, 0xb7, 0x60, 0x74, 0x2b, 0x95, 0x2e, - 0x46, 0x8a, 0x8e, 0xc3, 0x66, 0x72, 0x48, 0x35, 0xaa, 0xb7, 0x5a, 0xba, 0x3c, 0xc6, 0x59, 0xea, - 0xf6, 0x7f, 0xb7, 0xc0, 0x64, 0xa3, 0xc7, 0x9f, 0x65, 0xe6, 0xe0, 0xaa, 0xa0, 0xd4, 0x2e, 0x2b, - 0x5d, 0xb5, 0xcb, 0x47, 0xa1, 0xdc, 0xf6, 0xea, 0xe2, 0x7c, 0xa1, 0xaf, 0x88, 0x17, 0xe6, 0x30, - 0x2d, 0xb7, 0xff, 0x79, 0x45, 0x9b, 0x41, 0x44, 0x48, 0xdd, 0x8f, 0xc4, 0x67, 0x6f, 0xa8, 0x3c, - 0x75, 0xfc, 0xcb, 0xaf, 0x76, 0xe4, 0xa9, 0xfb, 0xe9, 0x83, 0x47, 0x4c, 0xf2, 0x01, 0xea, 0x96, - 0xa6, 0x6e, 0x60, 0x9f, 0x70, 0xc9, 0x9b, 0x50, 0xa5, 0x47, 0x30, 0x66, 0xcf, 0xac, 0xa6, 0x3a, - 0x55, 0xbd, 0x24, 0xca, 0xef, 0xee, 0x4e, 0xbc, 0xfb, 0xe0, 0xdd, 0x92, 0xb5, 0xb1, 0x6a, 0x1f, - 0xc5, 0x50, 0xa3, 0xbf, 0x59, 0x64, 0xa7, 0x38, 0xdc, 0x5d, 0x53, 0x3c, 0x53, 0x02, 0x0a, 0x09, - 0x1b, 0xd5, 0x74, 0x50, 0x00, 0x35, 0xf6, 0x44, 0x34, 0x23, 0xca, 0xcf, 0x80, 0x2b, 0x2a, 0xbe, - 0x52, 0x02, 0xee, 0xee, 0x4e, 0xbc, 0x70, 0x70, 0xa2, 0xaa, 0x3a, 0xd6, 0x24, 0xec, 0x2f, 0xf5, - 0xe9, 0xb5, 0x2b, 0xd2, 0x13, 0xfe, 0x48, 0xac, 0xdd, 0xe7, 0x33, 0x6b, 0xf7, 0x5c, 0xc7, 0xda, - 0x1d, 0xd1, 0x4f, 0x19, 0xa7, 0x56, 0xe3, 0x71, 0x2b, 0x02, 0xfb, 0xdb, 0x1b, 0x98, 0x06, 0xf4, - 0x6a, 0xdb, 0x8b, 0x48, 0xbc, 0x12, 0xb5, 0x03, 0x2f, 0x68, 0xb0, 0xe5, 0x58, 0x35, 0x35, 0xa0, - 0x14, 0x18, 0x67, 0xf1, 0xe9, 0xa1, 0x9e, 0xce, 0xf9, 0x0d, 0x67, 0x9b, 0xaf, 0x2a, 0x23, 0x63, - 0xdb, 0xaa, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0x06, 0xbb, 0x45, 0x37, 0x42, 0xca, 0xe9, 0x9a, 0xf0, - 0xd9, 0x9b, 0xdc, 0x3c, 0xdd, 0x9b, 0x5a, 0x13, 0xfc, 0x21, 0x6e, 0x0e, 0x43, 0xb7, 0x60, 0x60, - 0x9d, 0xbf, 0x2e, 0x59, 0x4c, 0xc6, 0x7d, 0xf1, 0x54, 0x25, 0x7b, 0xb7, 0x47, 0xbe, 0x5b, 0x79, - 0x57, 0xff, 0xc4, 0x92, 0x9a, 0xfd, 0xed, 0x0a, 0x8c, 0x66, 0x5e, 0x6d, 0x4e, 0x25, 0xda, 0x2d, - 0xed, 0x9b, 0x68, 0xf7, 0x43, 0x00, 0x75, 0xd2, 0xf2, 0xc3, 0x1d, 0xa6, 0x8e, 0xf5, 0x1d, 0x58, - 0x1d, 0x53, 0x1a, 0xfc, 0x9c, 0x6a, 0x05, 0x1b, 0x2d, 0x8a, 0x1c, 0x77, 0x3c, 0x6f, 0x6f, 0x26, - 0xc7, 0x9d, 0xf1, 0x2e, 0x47, 0xff, 0xf1, 0xbe, 0xcb, 0xe1, 0xc1, 0x28, 0xef, 0xa2, 0x0a, 0xdc, - 0xbe, 0x87, 0xf8, 0x6c, 0x16, 0xfa, 0x32, 0x97, 0x6e, 0x06, 0x67, 0xdb, 0xbd, 0x9f, 0x8f, 0xb2, - 0xa3, 0x77, 0x40, 0x4d, 0xce, 0x73, 0x3c, 0x5e, 0xd3, 0xc9, 0x2f, 0xe4, 0x32, 0x60, 0x8f, 0xa5, - 0x8b, 0x9f, 0x1d, 0x39, 0x28, 0xe0, 0x7e, 0xe5, 0xa0, 0xb0, 0xbf, 0x50, 0xa2, 0x7a, 0x3c, 0xef, - 0x97, 0x4a, 0xa7, 0xf4, 0x04, 0xf4, 0x3b, 0xed, 0x64, 0x33, 0xec, 0x78, 0x9f, 0x72, 0x9a, 0x95, - 0x62, 0x01, 0x45, 0x8b, 0xd0, 0x57, 0xd7, 0x29, 0x72, 0x0e, 0x32, 0x9f, 0xda, 0x24, 0xea, 0x24, - 0x04, 0xb3, 0x56, 0xd0, 0x23, 0xd0, 0x97, 0x38, 0x0d, 0x19, 0xad, 0xc7, 0x22, 0xb4, 0xd7, 0x9c, - 0x46, 0x8c, 0x59, 0xa9, 0x29, 0xbe, 0xfb, 0xf6, 0x11, 0xdf, 0x2f, 0xc0, 0x70, 0xec, 0x35, 0x02, - 0x27, 0x69, 0x47, 0xc4, 0xb8, 0x35, 0xd4, 0x3e, 0x23, 0x26, 0x10, 0xa7, 0x71, 0xed, 0xdf, 0x1d, - 0x82, 0x53, 0xab, 0xb3, 0x4b, 0x32, 0xf1, 0xfb, 0x91, 0x05, 0xdc, 0xe5, 0xd1, 0x38, 0xbe, 0x80, - 0xbb, 0x2e, 0xd4, 0x7d, 0x23, 0xe0, 0xce, 0x37, 0x02, 0xee, 0xd2, 0xd1, 0x4f, 0xe5, 0x22, 0xa2, - 0x9f, 0xf2, 0x7a, 0xd0, 0x4b, 0xf4, 0xd3, 0x91, 0x45, 0xe0, 0xed, 0xd9, 0xa1, 0x03, 0x45, 0xe0, - 0xa9, 0xf0, 0xc4, 0x42, 0xe2, 0x52, 0xba, 0x4c, 0x55, 0x6e, 0x78, 0xa2, 0x0a, 0x0d, 0xe3, 0x31, - 0x57, 0x82, 0xd5, 0xbf, 0x5c, 0x7c, 0x07, 0x7a, 0x08, 0x0d, 0x13, 0x61, 0x5f, 0x66, 0x38, 0xe2, - 0x40, 0x11, 0xe1, 0x88, 0x79, 0xdd, 0xd9, 0x37, 0x1c, 0xf1, 0x05, 0x18, 0x76, 0xfd, 0x30, 0x20, - 0x2b, 0x51, 0x98, 0x84, 0x6e, 0xe8, 0x0b, 0xb5, 0x5e, 0x3f, 0x44, 0x63, 0x02, 0x71, 0x1a, 0xb7, - 0x5b, 0x2c, 0x63, 0xed, 0xb0, 0xb1, 0x8c, 0x70, 0x9f, 0x62, 0x19, 0x7f, 0x51, 0x47, 0xdd, 0x0f, - 0xb2, 0x19, 0xf9, 0x50, 0xf1, 0x33, 0xd2, 0x4b, 0xe8, 0x3d, 0x7a, 0x83, 0x3f, 0x10, 0x49, 0x15, - 0xe3, 0xd9, 0xb0, 0x49, 0x15, 0xbf, 0x21, 0x36, 0x24, 0xaf, 0x1c, 0xc1, 0x82, 0xbd, 0xb1, 0xaa, - 0xc9, 0xa8, 0x47, 0x23, 0x75, 0x11, 0x4e, 0x77, 0xe4, 0x30, 0x59, 0x01, 0xbe, 0x52, 0x82, 0x1f, - 0xdb, 0xb7, 0x0b, 0xe8, 0x16, 0x40, 0xe2, 0x34, 0xc4, 0x42, 0x15, 0x17, 0x26, 0x87, 0x74, 0xec, - 0x5c, 0x93, 0xed, 0xf1, 0x74, 0x36, 0xea, 0x2f, 0xbb, 0x8a, 0x90, 0xbf, 0x99, 0x3f, 0x67, 0xe8, - 0x77, 0x64, 0xfd, 0xc4, 0xa1, 0x4f, 0x30, 0x83, 0x50, 0xf1, 0x1f, 0x91, 0x86, 0x7e, 0x5d, 0x5d, - 0x4d, 0x1f, 0x66, 0xa5, 0x58, 0x40, 0xd1, 0x73, 0x30, 0xe8, 0xf8, 0x3e, 0x0f, 0x1a, 0x22, 0xb1, - 0x78, 0x21, 0x4a, 0xa7, 0x1f, 0xd4, 0x20, 0x6c, 0xe2, 0xd9, 0x7f, 0x59, 0x82, 0x89, 0x7d, 0x78, - 0x4a, 0x47, 0xb0, 0x68, 0xa5, 0xe7, 0x60, 0x51, 0x11, 0x48, 0xd1, 0xdf, 0x25, 0x90, 0xe2, 0x39, - 0x18, 0x4c, 0x88, 0xd3, 0x14, 0xae, 0x60, 0xc2, 0x12, 0xa0, 0x6f, 0x80, 0x35, 0x08, 0x9b, 0x78, - 0x94, 0x8b, 0x8d, 0x38, 0xae, 0x4b, 0xe2, 0x58, 0x46, 0x4a, 0x08, 0x6b, 0x6a, 0x61, 0x61, 0x18, - 0xcc, 0x48, 0x3d, 0x9d, 0x22, 0x81, 0x33, 0x24, 0xb3, 0x03, 0x5e, 0xeb, 0x71, 0xc0, 0xbf, 0x5e, - 0x82, 0x47, 0xf7, 0x94, 0x6e, 0x3d, 0x07, 0xb1, 0xb4, 0x63, 0x12, 0x65, 0x17, 0xce, 0xb5, 0x98, - 0x44, 0x98, 0x41, 0xf8, 0x28, 0xb5, 0x5a, 0xc6, 0xeb, 0xf5, 0x45, 0x47, 0x74, 0xf1, 0x51, 0x4a, - 0x91, 0xc0, 0x19, 0x92, 0xf7, 0xba, 0x2c, 0xbf, 0xdd, 0x07, 0x8f, 0xf7, 0xa0, 0x03, 0x14, 0x18, - 0xf9, 0x96, 0x8e, 0xd2, 0x2c, 0xdf, 0xa7, 0x28, 0xcd, 0x7b, 0x1b, 0xae, 0x37, 0x83, 0x3b, 0x7b, - 0x8a, 0xb0, 0xfb, 0x46, 0x09, 0xce, 0x76, 0x57, 0x58, 0xd0, 0x7b, 0x60, 0x34, 0x52, 0xae, 0x6f, - 0x66, 0x80, 0xe7, 0x49, 0x6e, 0x6f, 0x49, 0x81, 0x70, 0x16, 0x17, 0x4d, 0x02, 0xb4, 0x9c, 0x64, - 0x33, 0xbe, 0x70, 0xdb, 0x8b, 0x13, 0x91, 0xe6, 0x69, 0x84, 0xdf, 0xf0, 0xc9, 0x52, 0x6c, 0x60, - 0x50, 0x72, 0xec, 0xdf, 0x5c, 0x78, 0x35, 0x4c, 0x78, 0x25, 0x7e, 0xd8, 0x3a, 0x29, 0x1f, 0xc5, - 0x31, 0x40, 0x38, 0x8b, 0x4b, 0xc9, 0xb1, 0x3b, 0x64, 0xde, 0x51, 0x7e, 0x0a, 0x63, 0xe4, 0x16, - 0x55, 0x29, 0x36, 0x30, 0xb2, 0xa1, 0xab, 0x95, 0xfd, 0x43, 0x57, 0xed, 0x7f, 0x56, 0x82, 0x33, - 0x5d, 0x15, 0xde, 0xde, 0xd8, 0xd4, 0x83, 0x17, 0x6e, 0x7a, 0x8f, 0x3b, 0xec, 0x60, 0x61, 0x8a, - 0x7f, 0xda, 0x65, 0xa5, 0x89, 0x30, 0xc5, 0x7b, 0xcf, 0xbe, 0xf0, 0xe0, 0x8d, 0x67, 0x47, 0x64, - 0x62, 0xdf, 0x01, 0x22, 0x13, 0x33, 0x93, 0x51, 0xe9, 0x51, 0x3a, 0xfc, 0x79, 0x5f, 0xd7, 0xe1, - 0xa5, 0x07, 0xe4, 0x9e, 0xac, 0xd9, 0x73, 0x70, 0xc2, 0x0b, 0xd8, 0x03, 0x69, 0xab, 0xed, 0x75, - 0x91, 0xf9, 0x87, 0xa7, 0xb7, 0x54, 0xe1, 0x0f, 0x0b, 0x19, 0x38, 0xee, 0xa8, 0xf1, 0x00, 0x46, - 0x8a, 0xde, 0xdb, 0x90, 0x1e, 0x90, 0x73, 0x2f, 0xc3, 0x69, 0x39, 0x14, 0x9b, 0x4e, 0x44, 0xea, - 0x42, 0xd8, 0xc6, 0x22, 0xe0, 0xe5, 0x0c, 0x0f, 0x9a, 0xc9, 0x41, 0xc0, 0xf9, 0xf5, 0xd8, 0x9b, - 0x54, 0x61, 0xcb, 0x73, 0xc5, 0x51, 0x50, 0xbf, 0x49, 0x45, 0x0b, 0x31, 0x87, 0x69, 0x79, 0x51, - 0x3b, 0x1e, 0x79, 0xf1, 0x21, 0xa8, 0xa9, 0xf1, 0xe6, 0xbe, 0xfb, 0x6a, 0x91, 0x77, 0xf8, 0xee, - 0xab, 0x15, 0x6e, 0x60, 0xed, 0xf7, 0x68, 0xea, 0x3b, 0x61, 0x48, 0x59, 0xbf, 0x7a, 0x7d, 0x19, - 0xcc, 0xfe, 0x8b, 0x7e, 0x18, 0x4e, 0x65, 0xfb, 0x4c, 0x99, 0xbd, 0xad, 0x7d, 0xcd, 0xde, 0x2c, - 0x6c, 0xa3, 0x1d, 0xc8, 0x67, 0x03, 0x8d, 0xb0, 0x8d, 0x76, 0x40, 0x30, 0x87, 0xd1, 0x43, 0x47, - 0x3d, 0xda, 0xc1, 0xed, 0x40, 0xf8, 0xa1, 0xaa, 0x43, 0xc7, 0x1c, 0x2b, 0xc5, 0x02, 0x8a, 0x3e, - 0x6e, 0xc1, 0x50, 0xcc, 0xee, 0x54, 0xf8, 0xa5, 0x81, 0x58, 0xe4, 0x97, 0x0f, 0x9f, 0xcc, 0x54, - 0x65, 0xb6, 0x65, 0x7e, 0x4b, 0x66, 0x09, 0x4e, 0x51, 0x44, 0x9f, 0xb2, 0xa0, 0xa6, 0x5e, 0x37, - 0x12, 0x6f, 0x80, 0xae, 0x16, 0x9b, 0x4c, 0x95, 0x5b, 0x9b, 0xd5, 0xf5, 0x94, 0xca, 0x6a, 0x89, - 0x35, 0x61, 0x14, 0x2b, 0x8b, 0xfe, 0xc0, 0xd1, 0x58, 0xf4, 0x21, 0xc7, 0x9a, 0xff, 0x0e, 0xa8, - 0x35, 0x9d, 0xc0, 0xdb, 0x20, 0x71, 0xc2, 0x8d, 0xec, 0x32, 0xc7, 0xb3, 0x2c, 0xc4, 0x1a, 0x4e, - 0x15, 0x80, 0x98, 0x7d, 0x58, 0x62, 0x58, 0xc5, 0x99, 0x02, 0xb0, 0xaa, 0x8b, 0xb1, 0x89, 0x63, - 0x9a, 0xf0, 0xe1, 0xbe, 0x9a, 0xf0, 0x07, 0xf7, 0x31, 0xe1, 0xaf, 0xc2, 0x69, 0xa7, 0x9d, 0x84, - 0x97, 0x88, 0xe3, 0x4f, 0xf3, 0x07, 0x7d, 0xc5, 0x03, 0xf5, 0x43, 0xcc, 0x2c, 0xa4, 0x3c, 0x2d, - 0x56, 0x89, 0xbf, 0xd1, 0x81, 0x84, 0xf3, 0xeb, 0xda, 0xff, 0xd8, 0x82, 0xd3, 0xb9, 0x4b, 0xe1, - 0xc1, 0xf5, 0x71, 0xb5, 0xbf, 0x5c, 0x81, 0x93, 0x39, 0xb9, 0x80, 0xd1, 0x8e, 0xb9, 0x49, 0xac, - 0x22, 0xdc, 0x45, 0xd2, 0xde, 0x0f, 0x72, 0x6e, 0x72, 0x76, 0xc6, 0xc1, 0x6e, 0xe5, 0xf4, 0xcd, - 0x58, 0xf9, 0x78, 0x6f, 0xc6, 0x8c, 0xb5, 0xde, 0x77, 0x5f, 0xd7, 0x7a, 0x65, 0x9f, 0xb5, 0xfe, - 0x4d, 0x0b, 0xc6, 0x9b, 0x5d, 0x1e, 0xa0, 0x10, 0x36, 0xe6, 0xeb, 0x47, 0xf3, 0xbc, 0xc5, 0xcc, - 0x23, 0x77, 0x76, 0x27, 0xba, 0xbe, 0xfb, 0x81, 0xbb, 0xf6, 0xca, 0xfe, 0x7e, 0x19, 0x58, 0x22, - 0x6a, 0x96, 0xef, 0x71, 0x07, 0x7d, 0xcc, 0x4c, 0x29, 0x6e, 0x15, 0x95, 0xfe, 0x9a, 0x37, 0xae, - 0x52, 0x92, 0xf3, 0x11, 0xcc, 0xcb, 0x50, 0x9e, 0xe5, 0x84, 0xa5, 0x1e, 0x38, 0xa1, 0x2f, 0x73, - 0xb7, 0x97, 0x8b, 0xcf, 0xdd, 0x5e, 0xcb, 0xe6, 0x6d, 0xdf, 0x7b, 0x8a, 0xfb, 0x1e, 0xc8, 0x29, - 0xfe, 0x55, 0x8b, 0x33, 0x9e, 0xcc, 0x2c, 0x68, 0x75, 0xc3, 0xda, 0x43, 0xdd, 0x78, 0x0a, 0xaa, - 0xb1, 0xe0, 0xcc, 0x42, 0x2d, 0xd1, 0xae, 0x0a, 0xa2, 0x1c, 0x2b, 0x0c, 0xf6, 0xb8, 0xb3, 0xef, - 0x87, 0xb7, 0x2e, 0x34, 0x5b, 0xc9, 0x8e, 0x50, 0x50, 0xf4, 0xe3, 0xce, 0x0a, 0x82, 0x0d, 0x2c, - 0xfb, 0xef, 0x94, 0xf8, 0x0a, 0x14, 0xfe, 0x2e, 0xcf, 0x67, 0x9e, 0xe3, 0xec, 0xdd, 0x55, 0xe4, - 0x23, 0x00, 0x6e, 0xd8, 0x6c, 0x51, 0xe5, 0x75, 0x2d, 0x14, 0xd7, 0x7f, 0x97, 0x0e, 0xfd, 0xf8, - 0xbf, 0x68, 0x4f, 0x7f, 0x86, 0x2e, 0xc3, 0x06, 0xbd, 0x14, 0x2f, 0x2d, 0xef, 0xcb, 0x4b, 0x53, - 0x6c, 0xa5, 0x6f, 0x6f, 0xb6, 0x62, 0xff, 0xa5, 0x05, 0x29, 0x35, 0x0b, 0xb5, 0xa0, 0x42, 0xbb, - 0xbb, 0x23, 0x76, 0xe8, 0x72, 0x71, 0x3a, 0x1d, 0x65, 0x8d, 0x62, 0xd9, 0xb3, 0x9f, 0x98, 0x13, - 0x42, 0xbe, 0x70, 0x8b, 0xe1, 0xa3, 0x7a, 0xb5, 0x38, 0x82, 0x97, 0xc2, 0x70, 0x8b, 0xdf, 0x61, - 0x6b, 0x17, 0x1b, 0xfb, 0x79, 0x18, 0xeb, 0xe8, 0x14, 0x7b, 0x79, 0x2f, 0xa4, 0xd2, 0x27, 0xb3, - 0x5c, 0x59, 0x94, 0x30, 0xe6, 0x30, 0xfb, 0x1b, 0x16, 0x9c, 0xc8, 0x36, 0x8f, 0xde, 0xb0, 0x60, - 0x2c, 0xce, 0xb6, 0x77, 0x54, 0x63, 0xa7, 0x5c, 0x5b, 0x3b, 0x40, 0xb8, 0xb3, 0x13, 0xf6, 0xff, - 0x11, 0x8b, 0xff, 0x86, 0x17, 0xd4, 0xc3, 0x5b, 0x4a, 0x31, 0xb1, 0xba, 0x2a, 0x26, 0x74, 0x3f, - 0xba, 0x9b, 0xa4, 0xde, 0xf6, 0x3b, 0x62, 0x8e, 0x57, 0x45, 0x39, 0x56, 0x18, 0x2c, 0xc4, 0xb2, - 0x2d, 0x1e, 0x77, 0xc8, 0x2c, 0xca, 0x39, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x16, 0x86, 0x8c, 0x8f, - 0x94, 0xeb, 0x92, 0x69, 0xf9, 0x86, 0xc8, 0x8c, 0x71, 0x0a, 0x0b, 0x4d, 0x02, 0x28, 0x25, 0x47, - 0x8a, 0x48, 0x66, 0xed, 0x52, 0x9c, 0x28, 0xc6, 0x06, 0x06, 0x0b, 0x68, 0xf6, 0xdb, 0x31, 0xbb, - 0xce, 0xe9, 0xd7, 0x09, 0x87, 0x67, 0x45, 0x19, 0x56, 0x50, 0xca, 0x4d, 0x9a, 0x4e, 0xd0, 0x76, - 0x7c, 0x3a, 0x42, 0xe2, 0xfc, 0xaa, 0xb6, 0xe1, 0x92, 0x82, 0x60, 0x03, 0x8b, 0x7e, 0x71, 0xe2, - 0x35, 0xc9, 0x4b, 0x61, 0x20, 0x5d, 0x12, 0xf5, 0x0d, 0x9f, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0xaf, - 0x16, 0x8c, 0xea, 0x4c, 0x0a, 0xfc, 0x8d, 0x7d, 0xf3, 0xb8, 0x6d, 0xed, 0x7b, 0xdc, 0x4e, 0xc7, - 0x8d, 0x97, 0x7a, 0x8a, 0x1b, 0x37, 0x43, 0xba, 0xcb, 0x7b, 0x86, 0x74, 0xff, 0x84, 0x7e, 0xbf, - 0x99, 0xc7, 0x7e, 0x0f, 0xe6, 0xbd, 0xdd, 0x8c, 0x6c, 0xe8, 0x77, 0x1d, 0x95, 0x71, 0x68, 0x88, - 0x1f, 0x48, 0x66, 0xa7, 0x19, 0x92, 0x80, 0xd8, 0xcb, 0x50, 0x53, 0x17, 0x5d, 0xf2, 0xf4, 0x6b, - 0xe5, 0x9f, 0x7e, 0x7b, 0x0a, 0x2d, 0x9d, 0x59, 0xff, 0xd6, 0x0f, 0x1e, 0x7b, 0xcb, 0x1f, 0xff, - 0xe0, 0xb1, 0xb7, 0x7c, 0xef, 0x07, 0x8f, 0xbd, 0xe5, 0xe3, 0x77, 0x1e, 0xb3, 0xbe, 0x75, 0xe7, - 0x31, 0xeb, 0x8f, 0xef, 0x3c, 0x66, 0x7d, 0xef, 0xce, 0x63, 0xd6, 0xf7, 0xef, 0x3c, 0x66, 0x7d, - 0xf1, 0x3f, 0x3f, 0xf6, 0x96, 0x97, 0x72, 0x7d, 0x52, 0xe9, 0x8f, 0xa7, 0xdd, 0xfa, 0xd4, 0xf6, - 0x79, 0xe6, 0x16, 0x49, 0xb7, 0xd7, 0x94, 0xb1, 0xa6, 0xa6, 0xe4, 0xf6, 0xfa, 0xbf, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x87, 0xd7, 0xd9, 0x4e, 0x18, 0xeb, 0x00, 0x00, + // 11861 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7d, 0x70, 0x1c, 0xc9, + 0x75, 0x18, 0xae, 0xd9, 0xc5, 0x02, 0xbb, 0x0f, 0x5f, 0x64, 0x93, 0xbc, 0xc3, 0xf1, 0x3e, 0x40, + 0xcf, 0xc9, 0xa7, 0xf3, 0x4f, 0x77, 0x80, 0x8f, 0xbe, 0x93, 0xef, 0xe7, 0xb3, 0x24, 0xe3, 0x83, + 0x04, 0x41, 0x02, 0x04, 0xae, 0x17, 0x24, 0xa5, 0x93, 0x4f, 0xa7, 0xc1, 0x6e, 0x63, 0x31, 0xc4, + 0xec, 0xcc, 0xde, 0xcc, 0x2c, 0x88, 0x3d, 0x4b, 0xb2, 0x64, 0x49, 0xb6, 0x6c, 0x7d, 0x46, 0x4a, + 0xc5, 0xe7, 0xc4, 0x52, 0x64, 0x4b, 0x49, 0x25, 0x95, 0x52, 0x59, 0x89, 0xff, 0x88, 0x53, 0xb6, + 0xcb, 0x15, 0x3b, 0xe5, 0x52, 0xe2, 0xa4, 0xec, 0xa8, 0x54, 0x96, 0x12, 0xdb, 0x88, 0xc4, 0x38, + 0x65, 0x57, 0xfe, 0x70, 0x55, 0x9c, 0xfc, 0x91, 0x62, 0x5c, 0xa9, 0x54, 0x7f, 0xf7, 0xcc, 0xce, + 0x02, 0x0b, 0x62, 0x00, 0x52, 0xf2, 0xfd, 0xb7, 0xdb, 0xef, 0xcd, 0x7b, 0x3d, 0x3d, 0xdd, 0xef, + 0xbd, 0x7e, 0xfd, 0xde, 0x6b, 0x58, 0x6a, 0xb8, 0xf1, 0x66, 0x7b, 0x7d, 0xaa, 0x16, 0x34, 0xa7, + 0x9d, 0xb0, 0x11, 0xb4, 0xc2, 0xe0, 0x26, 0xfb, 0xf1, 0x74, 0xad, 0x3e, 0xbd, 0x7d, 0x7e, 0xba, + 0xb5, 0xd5, 0x98, 0x76, 0x5a, 0x6e, 0x34, 0xed, 0xb4, 0x5a, 0x9e, 0x5b, 0x73, 0x62, 0x37, 0xf0, + 0xa7, 0xb7, 0x9f, 0x71, 0xbc, 0xd6, 0xa6, 0xf3, 0xcc, 0x74, 0x83, 0xf8, 0x24, 0x74, 0x62, 0x52, + 0x9f, 0x6a, 0x85, 0x41, 0x1c, 0xa0, 0x1f, 0xd7, 0xd4, 0xa6, 0x24, 0x35, 0xf6, 0xe3, 0x95, 0x5a, + 0x7d, 0x6a, 0xfb, 0xfc, 0x54, 0x6b, 0xab, 0x31, 0x45, 0xa9, 0x4d, 0x19, 0xd4, 0xa6, 0x24, 0xb5, + 0xb3, 0x4f, 0x1b, 0x7d, 0x69, 0x04, 0x8d, 0x60, 0x9a, 0x11, 0x5d, 0x6f, 0x6f, 0xb0, 0x7f, 0xec, + 0x0f, 0xfb, 0xc5, 0x99, 0x9d, 0xb5, 0xb7, 0x9e, 0x8f, 0xa6, 0xdc, 0x80, 0x76, 0x6f, 0xba, 0x16, + 0x84, 0x64, 0x7a, 0xbb, 0xab, 0x43, 0x67, 0x2f, 0x69, 0x1c, 0xb2, 0x13, 0x13, 0x3f, 0x72, 0x03, + 0x3f, 0x7a, 0x9a, 0x76, 0x81, 0x84, 0xdb, 0x24, 0x34, 0x5f, 0xcf, 0x40, 0xc8, 0xa2, 0xf4, 0xac, + 0xa6, 0xd4, 0x74, 0x6a, 0x9b, 0xae, 0x4f, 0xc2, 0x8e, 0x7e, 0xbc, 0x49, 0x62, 0x27, 0xeb, 0xa9, + 0xe9, 0x5e, 0x4f, 0x85, 0x6d, 0x3f, 0x76, 0x9b, 0xa4, 0xeb, 0x81, 0xb7, 0xed, 0xf7, 0x40, 0x54, + 0xdb, 0x24, 0x4d, 0xa7, 0xeb, 0xb9, 0x1f, 0xe9, 0xf5, 0x5c, 0x3b, 0x76, 0xbd, 0x69, 0xd7, 0x8f, + 0xa3, 0x38, 0x4c, 0x3f, 0x64, 0xff, 0xb2, 0x05, 0xa3, 0x33, 0x37, 0xaa, 0x33, 0xed, 0x78, 0x73, + 0x2e, 0xf0, 0x37, 0xdc, 0x06, 0x7a, 0x0e, 0x86, 0x6b, 0x5e, 0x3b, 0x8a, 0x49, 0x78, 0xd5, 0x69, + 0x92, 0x09, 0xeb, 0x9c, 0xf5, 0x64, 0x65, 0xf6, 0xd4, 0xd7, 0x77, 0x27, 0xdf, 0x74, 0x7b, 0x77, + 0x72, 0x78, 0x4e, 0x83, 0xb0, 0x89, 0x87, 0x7e, 0x08, 0x86, 0xc2, 0xc0, 0x23, 0x33, 0xf8, 0xea, + 0x44, 0x81, 0x3d, 0x32, 0x2e, 0x1e, 0x19, 0xc2, 0xbc, 0x19, 0x4b, 0x38, 0x45, 0x6d, 0x85, 0xc1, + 0x86, 0xeb, 0x91, 0x89, 0x62, 0x12, 0x75, 0x95, 0x37, 0x63, 0x09, 0xb7, 0xff, 0xb8, 0x00, 0x30, + 0xd3, 0x6a, 0xad, 0x86, 0xc1, 0x4d, 0x52, 0x8b, 0xd1, 0xfb, 0xa0, 0x4c, 0x87, 0xb9, 0xee, 0xc4, + 0x0e, 0xeb, 0xd8, 0xf0, 0xf9, 0x1f, 0x9e, 0xe2, 0x6f, 0x3d, 0x65, 0xbe, 0xb5, 0x9e, 0x64, 0x14, + 0x7b, 0x6a, 0xfb, 0x99, 0xa9, 0x95, 0x75, 0xfa, 0xfc, 0x32, 0x89, 0x9d, 0x59, 0x24, 0x98, 0x81, + 0x6e, 0xc3, 0x8a, 0x2a, 0xf2, 0x61, 0x20, 0x6a, 0x91, 0x1a, 0x7b, 0x87, 0xe1, 0xf3, 0x4b, 0x53, + 0x87, 0x99, 0xcd, 0x53, 0xba, 0xe7, 0xd5, 0x16, 0xa9, 0xcd, 0x8e, 0x08, 0xce, 0x03, 0xf4, 0x1f, + 0x66, 0x7c, 0xd0, 0x36, 0x0c, 0x46, 0xb1, 0x13, 0xb7, 0x23, 0x36, 0x14, 0xc3, 0xe7, 0xaf, 0xe6, + 0xc6, 0x91, 0x51, 0x9d, 0x1d, 0x13, 0x3c, 0x07, 0xf9, 0x7f, 0x2c, 0xb8, 0xd9, 0x7f, 0x66, 0xc1, + 0x98, 0x46, 0x5e, 0x72, 0xa3, 0x18, 0xfd, 0x64, 0xd7, 0xe0, 0x4e, 0xf5, 0x37, 0xb8, 0xf4, 0x69, + 0x36, 0xb4, 0x27, 0x04, 0xb3, 0xb2, 0x6c, 0x31, 0x06, 0xb6, 0x09, 0x25, 0x37, 0x26, 0xcd, 0x68, + 0xa2, 0x70, 0xae, 0xf8, 0xe4, 0xf0, 0xf9, 0x4b, 0x79, 0xbd, 0xe7, 0xec, 0xa8, 0x60, 0x5a, 0x5a, + 0xa4, 0xe4, 0x31, 0xe7, 0x62, 0xff, 0xf5, 0xa8, 0xf9, 0x7e, 0x74, 0xc0, 0xd1, 0x33, 0x30, 0x1c, + 0x05, 0xed, 0xb0, 0x46, 0x30, 0x69, 0x05, 0xd1, 0x84, 0x75, 0xae, 0x48, 0xa7, 0x1e, 0x9d, 0xd4, + 0x55, 0xdd, 0x8c, 0x4d, 0x1c, 0xf4, 0x69, 0x0b, 0x46, 0xea, 0x24, 0x8a, 0x5d, 0x9f, 0xf1, 0x97, + 0x9d, 0x5f, 0x3b, 0x74, 0xe7, 0x65, 0xe3, 0xbc, 0x26, 0x3e, 0x7b, 0x5a, 0xbc, 0xc8, 0x88, 0xd1, + 0x18, 0xe1, 0x04, 0x7f, 0xba, 0x38, 0xeb, 0x24, 0xaa, 0x85, 0x6e, 0x8b, 0xfe, 0x17, 0xcb, 0x47, + 0x2d, 0xce, 0x79, 0x0d, 0xc2, 0x26, 0x1e, 0xf2, 0xa1, 0x44, 0x17, 0x5f, 0x34, 0x31, 0xc0, 0xfa, + 0xbf, 0x78, 0xb8, 0xfe, 0x8b, 0x41, 0xa5, 0xeb, 0x5a, 0x8f, 0x3e, 0xfd, 0x17, 0x61, 0xce, 0x06, + 0x7d, 0xca, 0x82, 0x09, 0x21, 0x1c, 0x30, 0xe1, 0x03, 0x7a, 0x63, 0xd3, 0x8d, 0x89, 0xe7, 0x46, + 0xf1, 0x44, 0x89, 0xf5, 0x61, 0xba, 0xbf, 0xb9, 0xb5, 0x10, 0x06, 0xed, 0xd6, 0x15, 0xd7, 0xaf, + 0xcf, 0x9e, 0x13, 0x9c, 0x26, 0xe6, 0x7a, 0x10, 0xc6, 0x3d, 0x59, 0xa2, 0xcf, 0x5b, 0x70, 0xd6, + 0x77, 0x9a, 0x24, 0x6a, 0x39, 0xf4, 0xd3, 0x72, 0xf0, 0xac, 0xe7, 0xd4, 0xb6, 0x58, 0x8f, 0x06, + 0xef, 0xae, 0x47, 0xb6, 0xe8, 0xd1, 0xd9, 0xab, 0x3d, 0x49, 0xe3, 0x3d, 0xd8, 0xa2, 0x2f, 0x5b, + 0x70, 0x32, 0x08, 0x5b, 0x9b, 0x8e, 0x4f, 0xea, 0x12, 0x1a, 0x4d, 0x0c, 0xb1, 0xa5, 0xf7, 0xde, + 0xc3, 0x7d, 0xa2, 0x95, 0x34, 0xd9, 0xe5, 0xc0, 0x77, 0xe3, 0x20, 0xac, 0x92, 0x38, 0x76, 0xfd, + 0x46, 0x34, 0x7b, 0xe6, 0xf6, 0xee, 0xe4, 0xc9, 0x2e, 0x2c, 0xdc, 0xdd, 0x1f, 0xf4, 0x53, 0x30, + 0x1c, 0x75, 0xfc, 0xda, 0x0d, 0xd7, 0xaf, 0x07, 0xb7, 0xa2, 0x89, 0x72, 0x1e, 0xcb, 0xb7, 0xaa, + 0x08, 0x8a, 0x05, 0xa8, 0x19, 0x60, 0x93, 0x5b, 0xf6, 0x87, 0xd3, 0x53, 0xa9, 0x92, 0xf7, 0x87, + 0xd3, 0x93, 0x69, 0x0f, 0xb6, 0xe8, 0xe7, 0x2c, 0x18, 0x8d, 0xdc, 0x86, 0xef, 0xc4, 0xed, 0x90, + 0x5c, 0x21, 0x9d, 0x68, 0x02, 0x58, 0x47, 0x2e, 0x1f, 0x72, 0x54, 0x0c, 0x92, 0xb3, 0x67, 0x44, + 0x1f, 0x47, 0xcd, 0xd6, 0x08, 0x27, 0xf9, 0x66, 0x2d, 0x34, 0x3d, 0xad, 0x87, 0xf3, 0x5d, 0x68, + 0x7a, 0x52, 0xf7, 0x64, 0x89, 0x7e, 0x02, 0x4e, 0xf0, 0x26, 0x35, 0xb2, 0xd1, 0xc4, 0x08, 0x13, + 0xb4, 0xa7, 0x6f, 0xef, 0x4e, 0x9e, 0xa8, 0xa6, 0x60, 0xb8, 0x0b, 0x1b, 0xbd, 0x0a, 0x93, 0x2d, + 0x12, 0x36, 0xdd, 0x78, 0xc5, 0xf7, 0x3a, 0x52, 0x7c, 0xd7, 0x82, 0x16, 0xa9, 0x8b, 0xee, 0x44, + 0x13, 0xa3, 0xe7, 0xac, 0x27, 0xcb, 0xb3, 0x6f, 0x11, 0xdd, 0x9c, 0x5c, 0xdd, 0x1b, 0x1d, 0xef, + 0x47, 0x0f, 0xfd, 0xbe, 0x05, 0x67, 0x0d, 0x29, 0x5b, 0x25, 0xe1, 0xb6, 0x5b, 0x23, 0x33, 0xb5, + 0x5a, 0xd0, 0xf6, 0xe3, 0x68, 0x62, 0x8c, 0x0d, 0xe3, 0xfa, 0x51, 0xc8, 0xfc, 0x24, 0x2b, 0x3d, + 0x2f, 0x7b, 0xa2, 0x44, 0x78, 0x8f, 0x9e, 0xda, 0xff, 0xb6, 0x00, 0x27, 0xd2, 0x16, 0x00, 0xfa, + 0xc7, 0x16, 0x8c, 0xdf, 0xbc, 0x15, 0xaf, 0x05, 0x5b, 0xc4, 0x8f, 0x66, 0x3b, 0x54, 0x4e, 0x33, + 0xdd, 0x37, 0x7c, 0xbe, 0x96, 0xaf, 0xad, 0x31, 0x75, 0x39, 0xc9, 0xe5, 0x82, 0x1f, 0x87, 0x9d, + 0xd9, 0x07, 0xc5, 0x3b, 0x8d, 0x5f, 0xbe, 0xb1, 0x66, 0x42, 0x71, 0xba, 0x53, 0x67, 0x3f, 0x61, + 0xc1, 0xe9, 0x2c, 0x12, 0xe8, 0x04, 0x14, 0xb7, 0x48, 0x87, 0x5b, 0xa2, 0x98, 0xfe, 0x44, 0x2f, + 0x43, 0x69, 0xdb, 0xf1, 0xda, 0x44, 0x98, 0x69, 0x0b, 0x87, 0x7b, 0x11, 0xd5, 0x33, 0xcc, 0xa9, + 0xfe, 0x58, 0xe1, 0x79, 0xcb, 0xfe, 0xc3, 0x22, 0x0c, 0x1b, 0x1f, 0xed, 0x18, 0x4c, 0xcf, 0x20, + 0x61, 0x7a, 0x2e, 0xe7, 0x36, 0xdf, 0x7a, 0xda, 0x9e, 0xb7, 0x52, 0xb6, 0xe7, 0x4a, 0x7e, 0x2c, + 0xf7, 0x34, 0x3e, 0x51, 0x0c, 0x95, 0xa0, 0x45, 0xb7, 0x21, 0xd4, 0x86, 0x19, 0xc8, 0xe3, 0x13, + 0xae, 0x48, 0x72, 0xb3, 0xa3, 0xb7, 0x77, 0x27, 0x2b, 0xea, 0x2f, 0xd6, 0x8c, 0xec, 0x6f, 0x59, + 0x70, 0xda, 0xe8, 0xe3, 0x5c, 0xe0, 0xd7, 0x5d, 0xf6, 0x69, 0xcf, 0xc1, 0x40, 0xdc, 0x69, 0xc9, + 0xad, 0x8e, 0x1a, 0xa9, 0xb5, 0x4e, 0x8b, 0x60, 0x06, 0xa1, 0x3b, 0x96, 0x26, 0x89, 0x22, 0xa7, + 0x41, 0xd2, 0x9b, 0x9b, 0x65, 0xde, 0x8c, 0x25, 0x1c, 0x85, 0x80, 0x3c, 0x27, 0x8a, 0xd7, 0x42, + 0xc7, 0x8f, 0x18, 0xf9, 0x35, 0xb7, 0x49, 0xc4, 0x00, 0xff, 0x7f, 0xfd, 0xcd, 0x18, 0xfa, 0xc4, + 0xec, 0x03, 0xb7, 0x77, 0x27, 0xd1, 0x52, 0x17, 0x25, 0x9c, 0x41, 0xdd, 0xfe, 0xbc, 0x05, 0x0f, + 0x64, 0x0b, 0x18, 0xf4, 0x04, 0x0c, 0xf2, 0x7d, 0xae, 0x78, 0x3b, 0xfd, 0x49, 0x58, 0x2b, 0x16, + 0x50, 0x34, 0x0d, 0x15, 0xa5, 0xf0, 0xc4, 0x3b, 0x9e, 0x14, 0xa8, 0x15, 0xad, 0x25, 0x35, 0x0e, + 0x1d, 0x34, 0xfa, 0x47, 0x98, 0xa0, 0x6a, 0xd0, 0xd8, 0xc6, 0x90, 0x41, 0xec, 0x6f, 0x5a, 0xf0, + 0xe6, 0x7e, 0xc4, 0xde, 0xd1, 0xf5, 0xb1, 0x0a, 0x67, 0xea, 0x64, 0xc3, 0x69, 0x7b, 0x71, 0x92, + 0xa3, 0xe8, 0xf4, 0xa3, 0xe2, 0xe1, 0x33, 0xf3, 0x59, 0x48, 0x38, 0xfb, 0x59, 0xfb, 0xbf, 0x58, + 0x30, 0x6e, 0xbc, 0xd6, 0x31, 0x6c, 0x9d, 0xfc, 0xe4, 0xd6, 0x69, 0x31, 0xb7, 0x65, 0xda, 0x63, + 0xef, 0xf4, 0x29, 0x0b, 0xce, 0x1a, 0x58, 0xcb, 0x4e, 0x5c, 0xdb, 0xbc, 0xb0, 0xd3, 0x0a, 0x49, + 0x14, 0xd1, 0x29, 0xf5, 0xa8, 0x21, 0x8e, 0x67, 0x87, 0x05, 0x85, 0xe2, 0x15, 0xd2, 0xe1, 0xb2, + 0xf9, 0x29, 0x28, 0xf3, 0x35, 0x17, 0x84, 0xe2, 0x23, 0xa9, 0x77, 0x5b, 0x11, 0xed, 0x58, 0x61, + 0x20, 0x1b, 0x06, 0x99, 0xcc, 0xa5, 0x32, 0x88, 0x9a, 0x09, 0x40, 0xbf, 0xfb, 0x75, 0xd6, 0x82, + 0x05, 0xc4, 0x8e, 0x12, 0xdd, 0x59, 0x0d, 0x09, 0x9b, 0x0f, 0xf5, 0x8b, 0x2e, 0xf1, 0xea, 0x11, + 0xdd, 0xd6, 0x39, 0xbe, 0x1f, 0xc4, 0x62, 0x87, 0x66, 0x6c, 0xeb, 0x66, 0x74, 0x33, 0x36, 0x71, + 0x28, 0x53, 0xcf, 0x59, 0x27, 0x1e, 0x1f, 0x51, 0xc1, 0x74, 0x89, 0xb5, 0x60, 0x01, 0xb1, 0x6f, + 0x17, 0xd8, 0x06, 0x52, 0x49, 0x34, 0x72, 0x1c, 0xde, 0x87, 0x30, 0xa1, 0x02, 0x56, 0xf3, 0x93, + 0xc7, 0xa4, 0xb7, 0x07, 0xe2, 0xb5, 0x94, 0x16, 0xc0, 0xb9, 0x72, 0xdd, 0xdb, 0x0b, 0xf1, 0xa1, + 0x22, 0x4c, 0x26, 0x1f, 0xe8, 0x52, 0x22, 0x74, 0xcb, 0x6b, 0x30, 0x4a, 0xfb, 0xa3, 0x0c, 0x7c, + 0x6c, 0xe2, 0xf5, 0x90, 0xc3, 0x85, 0xa3, 0x94, 0xc3, 0xa6, 0x9a, 0x28, 0xee, 0xa3, 0x26, 0x9e, + 0x50, 0xa3, 0x3e, 0x90, 0x92, 0x79, 0x49, 0x55, 0x79, 0x0e, 0x06, 0xa2, 0x98, 0xb4, 0x26, 0x4a, + 0x49, 0x31, 0x5b, 0x8d, 0x49, 0x0b, 0x33, 0x08, 0x7a, 0x3b, 0x8c, 0xc7, 0x4e, 0xd8, 0x20, 0x71, + 0x48, 0xb6, 0x5d, 0xe6, 0xbb, 0x64, 0xfb, 0xd9, 0xca, 0xec, 0x29, 0x6a, 0x75, 0xad, 0x31, 0x10, + 0x96, 0x20, 0x9c, 0xc6, 0xb5, 0xff, 0x7b, 0x01, 0x1e, 0x4c, 0x7e, 0x02, 0xad, 0x18, 0xdf, 0x99, + 0x50, 0x8c, 0x6f, 0x35, 0x15, 0xe3, 0x9d, 0xdd, 0xc9, 0x87, 0x7b, 0x3c, 0xf6, 0x3d, 0xa3, 0x37, + 0xd1, 0x42, 0xea, 0x23, 0x4c, 0x27, 0x3f, 0xc2, 0x9d, 0xdd, 0xc9, 0x47, 0x7b, 0xbc, 0x63, 0xea, + 0x2b, 0x3d, 0x01, 0x83, 0x21, 0x71, 0xa2, 0xc0, 0x17, 0xdf, 0x49, 0x7d, 0x4d, 0xcc, 0x5a, 0xb1, + 0x80, 0xda, 0xdf, 0xa8, 0xa4, 0x07, 0x7b, 0x81, 0xfb, 0x63, 0x83, 0x10, 0xb9, 0x30, 0xc0, 0x76, + 0x6d, 0x5c, 0xb2, 0x5c, 0x39, 0xdc, 0x2a, 0xa4, 0x5a, 0x44, 0x91, 0x9e, 0x2d, 0xd3, 0xaf, 0x46, + 0x9b, 0x30, 0x63, 0x81, 0x76, 0xa0, 0x5c, 0x93, 0x9b, 0xa9, 0x42, 0x1e, 0x6e, 0x47, 0xb1, 0x95, + 0xd2, 0x1c, 0x47, 0xa8, 0xb8, 0x57, 0x3b, 0x30, 0xc5, 0x0d, 0x11, 0x28, 0x36, 0xdc, 0x58, 0x7c, + 0xd6, 0x43, 0x6e, 0x97, 0x17, 0x5c, 0xe3, 0x15, 0x87, 0xa8, 0x0e, 0x5a, 0x70, 0x63, 0x4c, 0xe9, + 0xa3, 0x8f, 0x59, 0x30, 0x1c, 0xd5, 0x9a, 0xab, 0x61, 0xb0, 0xed, 0xd6, 0x49, 0x28, 0x6c, 0xcc, + 0x43, 0x4a, 0xb6, 0xea, 0xdc, 0xb2, 0x24, 0xa8, 0xf9, 0x72, 0xf7, 0x85, 0x86, 0x60, 0x93, 0x2f, + 0xdd, 0x7b, 0x3d, 0x28, 0xde, 0x7d, 0x9e, 0xd4, 0xd8, 0x8a, 0x93, 0x7b, 0x66, 0x36, 0x53, 0x0e, + 0x6d, 0x73, 0xcf, 0xb7, 0x6b, 0x5b, 0x74, 0xbd, 0xe9, 0x0e, 0x3d, 0x7c, 0x7b, 0x77, 0xf2, 0xc1, + 0xb9, 0x6c, 0x9e, 0xb8, 0x57, 0x67, 0xd8, 0x80, 0xb5, 0xda, 0x9e, 0x87, 0xc9, 0xab, 0x6d, 0xc2, + 0x3c, 0x62, 0x39, 0x0c, 0xd8, 0xaa, 0x26, 0x98, 0x1a, 0x30, 0x03, 0x82, 0x4d, 0xbe, 0xe8, 0x55, + 0x18, 0x6c, 0x3a, 0x71, 0xe8, 0xee, 0x08, 0x37, 0xd8, 0x21, 0x77, 0x41, 0xcb, 0x8c, 0x96, 0x66, + 0xce, 0x14, 0x3d, 0x6f, 0xc4, 0x82, 0x11, 0x6a, 0x42, 0xa9, 0x49, 0xc2, 0x06, 0x99, 0x28, 0xe7, + 0xe1, 0xf2, 0x5f, 0xa6, 0xa4, 0x34, 0xc3, 0x0a, 0x35, 0xae, 0x58, 0x1b, 0xe6, 0x5c, 0xd0, 0xcb, + 0x50, 0x8e, 0x88, 0x47, 0x6a, 0xd4, 0x3c, 0xaa, 0x30, 0x8e, 0x3f, 0xd2, 0xa7, 0xa9, 0x48, 0xed, + 0x92, 0xaa, 0x78, 0x94, 0x2f, 0x30, 0xf9, 0x0f, 0x2b, 0x92, 0x74, 0x00, 0x5b, 0x5e, 0xbb, 0xe1, + 0xfa, 0x13, 0x90, 0xc7, 0x00, 0xae, 0x32, 0x5a, 0xa9, 0x01, 0xe4, 0x8d, 0x58, 0x30, 0xb2, 0xff, + 0x9b, 0x05, 0x28, 0x29, 0xd4, 0x8e, 0xc1, 0x26, 0x7e, 0x35, 0x69, 0x13, 0x2f, 0xe5, 0x69, 0xb4, + 0xf4, 0x30, 0x8b, 0x7f, 0xb3, 0x02, 0x29, 0x75, 0x70, 0x95, 0x44, 0x31, 0xa9, 0xbf, 0x21, 0xc2, + 0xdf, 0x10, 0xe1, 0x6f, 0x88, 0x70, 0x25, 0xc2, 0xd7, 0x53, 0x22, 0xfc, 0x1d, 0xc6, 0xaa, 0xd7, + 0xe7, 0xeb, 0xaf, 0xa8, 0x03, 0x78, 0xb3, 0x07, 0x06, 0x02, 0x95, 0x04, 0x97, 0xab, 0x2b, 0x57, + 0x33, 0x65, 0xf6, 0x2b, 0x49, 0x99, 0x7d, 0x58, 0x16, 0x7f, 0x1b, 0xa4, 0xf4, 0xef, 0x5b, 0xf0, + 0x96, 0xa4, 0xf4, 0x92, 0x33, 0x67, 0xb1, 0xe1, 0x07, 0x21, 0x99, 0x77, 0x37, 0x36, 0x48, 0x48, + 0xfc, 0x1a, 0x89, 0x94, 0x6f, 0xc7, 0xea, 0xe5, 0xdb, 0x41, 0xcf, 0xc2, 0xc8, 0xcd, 0x28, 0xf0, + 0x57, 0x03, 0xd7, 0x17, 0x22, 0x88, 0xee, 0x38, 0x4e, 0xdc, 0xde, 0x9d, 0x1c, 0xa1, 0x23, 0x2a, + 0xdb, 0x71, 0x02, 0x0b, 0xcd, 0xc1, 0xc9, 0x9b, 0xaf, 0xae, 0x3a, 0xb1, 0xe1, 0x4d, 0x90, 0xfb, + 0x7e, 0x76, 0x1e, 0x75, 0xf9, 0xc5, 0x14, 0x10, 0x77, 0xe3, 0xdb, 0xff, 0xa0, 0x00, 0x0f, 0xa5, + 0x5e, 0x24, 0xf0, 0xbc, 0xa0, 0x1d, 0xd3, 0x3d, 0x11, 0xfa, 0xa2, 0x05, 0x27, 0x9a, 0x49, 0x87, + 0x45, 0x24, 0xdc, 0xdd, 0xef, 0xca, 0x4d, 0x47, 0xa4, 0x3c, 0x22, 0xb3, 0x13, 0x62, 0x84, 0x4e, + 0xa4, 0x00, 0x11, 0xee, 0xea, 0x0b, 0x7a, 0x19, 0x2a, 0x4d, 0x67, 0xe7, 0x5a, 0xab, 0xee, 0xc4, + 0x72, 0x3b, 0xda, 0xdb, 0x8b, 0xd0, 0x8e, 0x5d, 0x6f, 0x8a, 0x47, 0x6e, 0x4c, 0x2d, 0xfa, 0xf1, + 0x4a, 0x58, 0x8d, 0x43, 0xd7, 0x6f, 0x70, 0x27, 0xe7, 0xb2, 0x24, 0x83, 0x35, 0x45, 0xfb, 0x0b, + 0x56, 0x5a, 0x49, 0xa9, 0xd1, 0x09, 0x9d, 0x98, 0x34, 0x3a, 0xe8, 0xfd, 0x50, 0xa2, 0xfb, 0x46, + 0x39, 0x2a, 0x37, 0xf2, 0xd4, 0x9c, 0xc6, 0x97, 0xd0, 0x4a, 0x94, 0xfe, 0x8b, 0x30, 0x67, 0x6a, + 0x7f, 0xb1, 0x92, 0x36, 0x16, 0xd8, 0xd9, 0xfc, 0x79, 0x80, 0x46, 0xb0, 0x46, 0x9a, 0x2d, 0x8f, + 0x0e, 0x8b, 0xc5, 0x0e, 0x78, 0x94, 0xab, 0x64, 0x41, 0x41, 0xb0, 0x81, 0x85, 0x7e, 0xde, 0x02, + 0x68, 0xc8, 0x39, 0x2f, 0x0d, 0x81, 0x6b, 0x79, 0xbe, 0x8e, 0x5e, 0x51, 0xba, 0x2f, 0x8a, 0x21, + 0x36, 0x98, 0xa3, 0x9f, 0xb1, 0xa0, 0x1c, 0xcb, 0xee, 0x73, 0xd5, 0xb8, 0x96, 0x67, 0x4f, 0xe4, + 0x4b, 0x6b, 0x9b, 0x48, 0x0d, 0x89, 0xe2, 0x8b, 0x7e, 0xd6, 0x02, 0x88, 0x3a, 0x7e, 0x6d, 0x35, + 0xf0, 0xdc, 0x5a, 0x47, 0x68, 0xcc, 0xeb, 0xb9, 0xba, 0x73, 0x14, 0xf5, 0xd9, 0x31, 0x3a, 0x1a, + 0xfa, 0x3f, 0x36, 0x38, 0xa3, 0x0f, 0x42, 0x39, 0x12, 0xd3, 0x4d, 0xe8, 0xc8, 0xb5, 0x7c, 0x9d, + 0x4a, 0x9c, 0xb6, 0x10, 0xaf, 0xe2, 0x1f, 0x56, 0x3c, 0xd1, 0x2f, 0x5a, 0x30, 0xde, 0x4a, 0xba, + 0x09, 0x85, 0x3a, 0xcc, 0x4f, 0x06, 0xa4, 0xdc, 0x90, 0xdc, 0xdb, 0x92, 0x6a, 0xc4, 0xe9, 0x5e, + 0x50, 0x09, 0xa8, 0x67, 0xf0, 0x4a, 0x8b, 0xbb, 0x2c, 0x87, 0xb4, 0x04, 0x5c, 0x48, 0x03, 0x71, + 0x37, 0x3e, 0x5a, 0x85, 0xd3, 0xb4, 0x77, 0x1d, 0x6e, 0x7e, 0x4a, 0xf5, 0x12, 0x31, 0x65, 0x58, + 0x9e, 0x7d, 0x44, 0xcc, 0x10, 0x76, 0xd6, 0x91, 0xc6, 0xc1, 0x99, 0x4f, 0xa2, 0x3f, 0xb4, 0xe0, + 0x11, 0x97, 0xa9, 0x01, 0xd3, 0x61, 0xaf, 0x35, 0x82, 0x38, 0x68, 0x27, 0xb9, 0xca, 0x8a, 0x5e, + 0xea, 0x67, 0xf6, 0xcd, 0xe2, 0x0d, 0x1e, 0x59, 0xdc, 0xa3, 0x4b, 0x78, 0xcf, 0x0e, 0xa3, 0x1f, + 0x85, 0x51, 0xb9, 0x2e, 0x56, 0xa9, 0x08, 0x66, 0x8a, 0xb6, 0x32, 0x7b, 0xf2, 0xf6, 0xee, 0xe4, + 0xe8, 0x9a, 0x09, 0xc0, 0x49, 0x3c, 0xfb, 0xdf, 0x15, 0x13, 0xa7, 0x44, 0xca, 0x87, 0xc9, 0xc4, + 0x4d, 0x4d, 0xfa, 0x7f, 0xa4, 0xf4, 0xcc, 0x55, 0xdc, 0x28, 0xef, 0x92, 0x16, 0x37, 0xaa, 0x29, + 0xc2, 0x06, 0x73, 0x6a, 0x94, 0x9e, 0x74, 0xd2, 0x9e, 0x52, 0x21, 0x01, 0x5f, 0xce, 0xb3, 0x4b, + 0xdd, 0x67, 0x7a, 0x0f, 0x89, 0xae, 0x9d, 0xec, 0x02, 0xe1, 0xee, 0x2e, 0xa1, 0x0f, 0x40, 0x25, + 0x54, 0x91, 0x2d, 0xc5, 0x3c, 0xb6, 0x6a, 0x72, 0xda, 0x88, 0xee, 0xa8, 0x03, 0x20, 0x1d, 0xc3, + 0xa2, 0x39, 0xda, 0x7f, 0x90, 0x3c, 0x18, 0x33, 0x64, 0x47, 0x1f, 0x87, 0x7e, 0x9f, 0xb6, 0x60, + 0x38, 0x0c, 0x3c, 0xcf, 0xf5, 0x1b, 0x54, 0xce, 0x09, 0x65, 0xfd, 0x9e, 0x23, 0xd1, 0x97, 0x42, + 0xa0, 0x31, 0xcb, 0x1a, 0x6b, 0x9e, 0xd8, 0xec, 0x80, 0xfd, 0x67, 0x16, 0x4c, 0xf4, 0x92, 0xc7, + 0x88, 0xc0, 0xc3, 0x52, 0xd8, 0xa8, 0xa1, 0x58, 0xf1, 0xe7, 0x89, 0x47, 0x94, 0xdb, 0xbc, 0x3c, + 0xfb, 0xb8, 0x78, 0xcd, 0x87, 0x57, 0x7b, 0xa3, 0xe2, 0xbd, 0xe8, 0xa0, 0x97, 0xe0, 0x84, 0xf1, + 0x5e, 0x91, 0x1a, 0x98, 0xca, 0xec, 0x14, 0x35, 0x80, 0x66, 0x52, 0xb0, 0x3b, 0xbb, 0x93, 0x0f, + 0xa4, 0xdb, 0x84, 0xc2, 0xe8, 0xa2, 0x63, 0x7f, 0xa5, 0x90, 0xfe, 0x5a, 0x4a, 0xd7, 0xbf, 0x6e, + 0x75, 0x79, 0x13, 0xde, 0x75, 0x14, 0xfa, 0x95, 0xf9, 0x1d, 0x54, 0x18, 0x46, 0x6f, 0x9c, 0x7b, + 0x78, 0x6c, 0x6f, 0xff, 0xfb, 0x01, 0xd8, 0xa3, 0x67, 0x7d, 0x18, 0xef, 0x07, 0x3e, 0x47, 0xfd, + 0xa4, 0xa5, 0x0e, 0xcc, 0xf8, 0x1a, 0xae, 0x1f, 0xd5, 0xd8, 0xf3, 0xfd, 0x53, 0xc4, 0x43, 0x47, + 0x94, 0x17, 0x3d, 0x79, 0x34, 0x87, 0xbe, 0x64, 0x25, 0x8f, 0xfc, 0x78, 0x50, 0xa3, 0x7b, 0x64, + 0x7d, 0x32, 0xce, 0x11, 0x79, 0xc7, 0xf4, 0xe9, 0x53, 0xaf, 0x13, 0xc6, 0x29, 0x80, 0x0d, 0xd7, + 0x77, 0x3c, 0xf7, 0x35, 0xba, 0x3b, 0x2a, 0x31, 0x05, 0xcf, 0x2c, 0xa6, 0x8b, 0xaa, 0x15, 0x1b, + 0x18, 0x67, 0xff, 0x7f, 0x18, 0x36, 0xde, 0x3c, 0x23, 0xe2, 0xe5, 0xb4, 0x19, 0xf1, 0x52, 0x31, + 0x02, 0x55, 0xce, 0xbe, 0x03, 0x4e, 0xa4, 0x3b, 0x78, 0x90, 0xe7, 0xed, 0xff, 0x3d, 0x94, 0x3e, + 0x83, 0x5b, 0x23, 0x61, 0x93, 0x76, 0xed, 0x0d, 0xc7, 0xd6, 0x1b, 0x8e, 0xad, 0x37, 0x1c, 0x5b, + 0xe6, 0xd9, 0x84, 0x70, 0xda, 0x0c, 0x1d, 0x93, 0xd3, 0x26, 0xe1, 0x86, 0x2a, 0xe7, 0xee, 0x86, + 0xb2, 0x3f, 0xd6, 0xe5, 0xb9, 0x5f, 0x0b, 0x09, 0x41, 0x01, 0x94, 0xfc, 0xa0, 0x4e, 0xa4, 0x8d, + 0x7b, 0x39, 0x1f, 0x83, 0xed, 0x6a, 0x50, 0x37, 0xc2, 0xc5, 0xe9, 0xbf, 0x08, 0x73, 0x3e, 0xf6, + 0x47, 0x07, 0x21, 0x61, 0x4e, 0xf2, 0xef, 0xfe, 0x43, 0x30, 0x14, 0x92, 0x56, 0x70, 0x0d, 0x2f, + 0x09, 0x5d, 0xa6, 0x33, 0x4a, 0x78, 0x33, 0x96, 0x70, 0xaa, 0xf3, 0x5a, 0x4e, 0xbc, 0x29, 0x94, + 0x99, 0xd2, 0x79, 0xab, 0x4e, 0xbc, 0x89, 0x19, 0x04, 0xbd, 0x03, 0xc6, 0xe2, 0xc4, 0x51, 0xb8, + 0x38, 0xf2, 0x7d, 0x40, 0xe0, 0x8e, 0x25, 0x0f, 0xca, 0x71, 0x0a, 0x1b, 0xbd, 0x0a, 0x03, 0x9b, + 0xc4, 0x6b, 0x8a, 0x4f, 0x5f, 0xcd, 0x4f, 0xd7, 0xb0, 0x77, 0xbd, 0x44, 0xbc, 0x26, 0x97, 0x84, + 0xf4, 0x17, 0x66, 0xac, 0xe8, 0xbc, 0xaf, 0x6c, 0xb5, 0xa3, 0x38, 0x68, 0xba, 0xaf, 0x49, 0x4f, + 0xe7, 0xbb, 0x72, 0x66, 0x7c, 0x45, 0xd2, 0xe7, 0x2e, 0x25, 0xf5, 0x17, 0x6b, 0xce, 0xac, 0x1f, + 0x75, 0x37, 0x64, 0x53, 0xa6, 0x23, 0x1c, 0x96, 0x79, 0xf7, 0x63, 0x5e, 0xd2, 0xe7, 0xfd, 0x50, + 0x7f, 0xb1, 0xe6, 0x8c, 0x3a, 0x6a, 0xfd, 0x0d, 0xb3, 0x3e, 0x5c, 0xcb, 0xb9, 0x0f, 0x7c, 0xed, + 0x65, 0xae, 0xc3, 0xc7, 0xa1, 0x54, 0xdb, 0x74, 0xc2, 0x78, 0x62, 0x84, 0x4d, 0x1a, 0x35, 0x8b, + 0xe7, 0x68, 0x23, 0xe6, 0x30, 0xf4, 0x28, 0x14, 0x43, 0xb2, 0xc1, 0xa2, 0x93, 0x8d, 0xb8, 0x28, + 0x4c, 0x36, 0x30, 0x6d, 0x57, 0x76, 0xd9, 0x58, 0xcf, 0x80, 0xb9, 0x5f, 0x29, 0x24, 0x0d, 0xbb, + 0xe4, 0xc8, 0xf0, 0xf5, 0x50, 0x6b, 0x87, 0x91, 0x74, 0x90, 0x19, 0xeb, 0x81, 0x35, 0x63, 0x09, + 0x47, 0x1f, 0xb6, 0x60, 0xe8, 0x66, 0x14, 0xf8, 0x3e, 0x89, 0x85, 0x12, 0xbd, 0x9e, 0xf3, 0x60, + 0x5d, 0xe6, 0xd4, 0x75, 0x1f, 0x44, 0x03, 0x96, 0x7c, 0x69, 0x77, 0xc9, 0x4e, 0xcd, 0x6b, 0xd7, + 0xbb, 0x82, 0x61, 0x2e, 0xf0, 0x66, 0x2c, 0xe1, 0x14, 0xd5, 0xf5, 0x39, 0xea, 0x40, 0x12, 0x75, + 0xd1, 0x17, 0xa8, 0x02, 0x6e, 0xff, 0x7a, 0x19, 0xce, 0x64, 0x2e, 0x1f, 0x6a, 0x72, 0x31, 0xa3, + 0xe6, 0xa2, 0xeb, 0x11, 0x19, 0x06, 0xc6, 0x4c, 0xae, 0xeb, 0xaa, 0x15, 0x1b, 0x18, 0xe8, 0xa7, + 0x01, 0x5a, 0x4e, 0xe8, 0x34, 0x89, 0x72, 0x60, 0x1f, 0xda, 0xb2, 0xa1, 0xfd, 0x58, 0x95, 0x34, + 0xf5, 0x26, 0x5e, 0x35, 0x45, 0xd8, 0x60, 0x89, 0x9e, 0x83, 0xe1, 0x90, 0x78, 0xc4, 0x89, 0x58, + 0xf8, 0x7b, 0x3a, 0x97, 0x07, 0x6b, 0x10, 0x36, 0xf1, 0xd0, 0x13, 0x2a, 0x62, 0x2e, 0x15, 0x39, + 0x94, 0x8c, 0x9a, 0x43, 0x9f, 0xb1, 0x60, 0x6c, 0xc3, 0xf5, 0x88, 0xe6, 0x2e, 0x32, 0x6f, 0x56, + 0x0e, 0xff, 0x92, 0x17, 0x4d, 0xba, 0x5a, 0x86, 0x26, 0x9a, 0x23, 0x9c, 0x62, 0x4f, 0x3f, 0xf3, + 0x36, 0x09, 0x99, 0xf0, 0x1d, 0x4c, 0x7e, 0xe6, 0xeb, 0xbc, 0x19, 0x4b, 0x38, 0x9a, 0x81, 0xf1, + 0x96, 0x13, 0x45, 0x73, 0x21, 0xa9, 0x13, 0x3f, 0x76, 0x1d, 0x8f, 0xe7, 0xc5, 0x94, 0x75, 0x38, + 0xf9, 0x6a, 0x12, 0x8c, 0xd3, 0xf8, 0xe8, 0xdd, 0xf0, 0x20, 0xf7, 0x10, 0x2d, 0xbb, 0x51, 0xe4, + 0xfa, 0x0d, 0x3d, 0x0d, 0x84, 0xa3, 0x6c, 0x52, 0x90, 0x7a, 0x70, 0x31, 0x1b, 0x0d, 0xf7, 0x7a, + 0x1e, 0x3d, 0x05, 0xe5, 0x68, 0xcb, 0x6d, 0xcd, 0x85, 0xf5, 0x88, 0x9d, 0x0e, 0x95, 0xb5, 0x5b, + 0xb6, 0x2a, 0xda, 0xb1, 0xc2, 0x40, 0x35, 0x18, 0xe1, 0x9f, 0x84, 0x87, 0xfc, 0x09, 0x09, 0xfa, + 0x74, 0x4f, 0x45, 0x2e, 0xd2, 0x3c, 0xa7, 0xb0, 0x73, 0xeb, 0x82, 0x3c, 0xab, 0xe2, 0x47, 0x2b, + 0xd7, 0x0d, 0x32, 0x38, 0x41, 0x34, 0xb9, 0xa7, 0x1b, 0xee, 0x63, 0x4f, 0xf7, 0x1c, 0x0c, 0x6f, + 0xb5, 0xd7, 0x89, 0x18, 0x79, 0x21, 0xd8, 0xd4, 0xec, 0xbb, 0xa2, 0x41, 0xd8, 0xc4, 0x63, 0xd1, + 0x96, 0x2d, 0x57, 0xfc, 0x8b, 0x26, 0x46, 0x8d, 0x68, 0xcb, 0xd5, 0x45, 0xd9, 0x8c, 0x4d, 0x1c, + 0xda, 0x35, 0x3a, 0x16, 0x6b, 0x24, 0x62, 0xc9, 0x14, 0x74, 0xb8, 0x54, 0xd7, 0xaa, 0x12, 0x80, + 0x35, 0x0e, 0x5a, 0x85, 0xd3, 0xf4, 0x4f, 0x95, 0xa5, 0xb9, 0x5e, 0x77, 0x3c, 0xb7, 0xce, 0x43, + 0xff, 0xc6, 0x93, 0xfe, 0xcd, 0x6a, 0x06, 0x0e, 0xce, 0x7c, 0xd2, 0xfe, 0xa5, 0x42, 0xd2, 0x73, + 0x62, 0x8a, 0x30, 0x14, 0x51, 0x41, 0x15, 0x5f, 0x77, 0x42, 0x69, 0xf0, 0x1c, 0x32, 0xb9, 0x49, + 0xd0, 0xbd, 0xee, 0x84, 0xa6, 0xc8, 0x63, 0x0c, 0xb0, 0xe4, 0x84, 0x6e, 0xc2, 0x40, 0xec, 0x39, + 0x39, 0x65, 0x43, 0x1a, 0x1c, 0xb5, 0x23, 0x6b, 0x69, 0x26, 0xc2, 0x8c, 0x07, 0x7a, 0x84, 0xee, + 0xde, 0xd6, 0xe5, 0x49, 0x9b, 0xd8, 0x70, 0xad, 0x47, 0x98, 0xb5, 0xda, 0x7f, 0x3e, 0x9c, 0xa1, + 0x75, 0x94, 0x21, 0x80, 0xce, 0x03, 0xd0, 0x49, 0xb3, 0x1a, 0x92, 0x0d, 0x77, 0x47, 0x18, 0x62, + 0x4a, 0xb2, 0x5d, 0x55, 0x10, 0x6c, 0x60, 0xc9, 0x67, 0xaa, 0xed, 0x0d, 0xfa, 0x4c, 0xa1, 0xfb, + 0x19, 0x0e, 0xc1, 0x06, 0x16, 0x7a, 0x16, 0x06, 0xdd, 0xa6, 0xd3, 0x50, 0x81, 0xc0, 0x8f, 0x50, + 0x91, 0xb6, 0xc8, 0x5a, 0xee, 0xec, 0x4e, 0x8e, 0xa9, 0x0e, 0xb1, 0x26, 0x2c, 0x70, 0xd1, 0x57, + 0x2c, 0x18, 0xa9, 0x05, 0xcd, 0x66, 0xe0, 0xf3, 0xed, 0xb3, 0xf0, 0x05, 0xdc, 0x3c, 0x2a, 0x33, + 0x69, 0x6a, 0xce, 0x60, 0xc6, 0x9d, 0x01, 0x2a, 0x6d, 0xd3, 0x04, 0xe1, 0x44, 0xaf, 0x4c, 0xc9, + 0x57, 0xda, 0x47, 0xf2, 0xfd, 0x86, 0x05, 0x27, 0xf9, 0xb3, 0xc6, 0xae, 0x5e, 0x64, 0x28, 0x06, + 0x47, 0xfc, 0x5a, 0x5d, 0x8e, 0x0e, 0xe5, 0xec, 0xed, 0x82, 0xe3, 0xee, 0x4e, 0xa2, 0x05, 0x38, + 0xb9, 0x11, 0x84, 0x35, 0x62, 0x0e, 0x84, 0x10, 0xdb, 0x8a, 0xd0, 0xc5, 0x34, 0x02, 0xee, 0x7e, + 0x06, 0x5d, 0x87, 0x07, 0x8c, 0x46, 0x73, 0x1c, 0xb8, 0xe4, 0x7e, 0x4c, 0x50, 0x7b, 0xe0, 0x62, + 0x26, 0x16, 0xee, 0xf1, 0x74, 0x52, 0x48, 0x56, 0xfa, 0x10, 0x92, 0xaf, 0xc0, 0x43, 0xb5, 0xee, + 0x91, 0xd9, 0x8e, 0xda, 0xeb, 0x11, 0x97, 0xe3, 0xe5, 0xd9, 0x1f, 0x10, 0x04, 0x1e, 0x9a, 0xeb, + 0x85, 0x88, 0x7b, 0xd3, 0x40, 0xef, 0x87, 0x72, 0x48, 0xd8, 0x57, 0x89, 0x44, 0xba, 0xde, 0x21, + 0xbd, 0x1d, 0xda, 0x82, 0xe7, 0x64, 0xb5, 0x66, 0x12, 0x0d, 0x11, 0x56, 0x1c, 0xd1, 0x2d, 0x18, + 0x6a, 0x39, 0x71, 0x6d, 0x53, 0x24, 0xe9, 0x1d, 0xda, 0x37, 0xaf, 0x98, 0xb3, 0xa3, 0x14, 0x23, + 0xad, 0x9f, 0x33, 0xc1, 0x92, 0x1b, 0xb5, 0xd5, 0x6a, 0x41, 0xb3, 0x15, 0xf8, 0xc4, 0x8f, 0xa5, + 0x12, 0x19, 0xe3, 0xe7, 0x1d, 0xb2, 0x15, 0x1b, 0x18, 0x54, 0x23, 0x30, 0xdf, 0xdf, 0x0d, 0x37, + 0xde, 0x0c, 0xda, 0xb1, 0xdc, 0xca, 0x0a, 0x6d, 0xa2, 0x34, 0xc2, 0x52, 0x06, 0x0e, 0xce, 0x7c, + 0x32, 0xad, 0xfe, 0xc6, 0xef, 0x4e, 0xfd, 0x9d, 0xd8, 0x5f, 0xfd, 0x9d, 0x7d, 0x27, 0x9c, 0xec, + 0x12, 0x1a, 0x07, 0x72, 0xf0, 0xcd, 0xc3, 0x03, 0xd9, 0xcb, 0xf3, 0x40, 0x6e, 0xbe, 0x5f, 0x4f, + 0xc5, 0x79, 0x1b, 0x5b, 0x9e, 0x3e, 0x5c, 0xc6, 0x0e, 0x14, 0x89, 0xbf, 0x2d, 0xb4, 0xd5, 0xc5, + 0xc3, 0xcd, 0x92, 0x0b, 0xfe, 0x36, 0x97, 0x2e, 0xcc, 0x2f, 0x76, 0xc1, 0xdf, 0xc6, 0x94, 0x36, + 0xfa, 0x9c, 0x95, 0x30, 0xc8, 0xb9, 0xa3, 0xf9, 0xbd, 0x47, 0xb2, 0xc7, 0xeb, 0xdb, 0x46, 0xb7, + 0xff, 0x43, 0x01, 0xce, 0xed, 0x47, 0xa4, 0x8f, 0xe1, 0x7b, 0x1c, 0x06, 0x23, 0x16, 0xb9, 0x21, + 0xc4, 0xff, 0x30, 0x5d, 0x15, 0x3c, 0x96, 0xe3, 0x15, 0x2c, 0x40, 0xc8, 0x83, 0x62, 0xd3, 0x69, + 0x09, 0xff, 0xe3, 0xe2, 0x61, 0xf3, 0xe1, 0xe8, 0x7f, 0xc7, 0x5b, 0x76, 0x5a, 0x7c, 0x7a, 0x1a, + 0x0d, 0x98, 0xb2, 0x41, 0x31, 0x94, 0x9c, 0x30, 0x74, 0x64, 0x98, 0xc0, 0x95, 0x7c, 0xf8, 0xcd, + 0x50, 0x92, 0xfc, 0x94, 0x35, 0xd1, 0x84, 0x39, 0x33, 0xfb, 0x17, 0xcb, 0x89, 0xe4, 0x29, 0x16, + 0xfb, 0x11, 0xc1, 0xa0, 0x70, 0x3b, 0x5a, 0x79, 0xa7, 0x21, 0xf2, 0xec, 0x64, 0xb6, 0xa3, 0x17, + 0x35, 0x1e, 0x04, 0x2b, 0xf4, 0x09, 0x8b, 0x55, 0x52, 0x90, 0x19, 0x69, 0x62, 0x97, 0x7c, 0x34, + 0x85, 0x1d, 0xcc, 0xfa, 0x0c, 0xb2, 0x11, 0x9b, 0xdc, 0x45, 0x45, 0x14, 0xb6, 0x3b, 0xe8, 0xae, + 0x88, 0xc2, 0xac, 0x7d, 0x09, 0x47, 0x3b, 0x19, 0x31, 0x1e, 0x39, 0x64, 0xe3, 0xf7, 0x11, 0xd5, + 0xf1, 0x25, 0x0b, 0x4e, 0xba, 0xe9, 0xc3, 0x7a, 0xb1, 0xa7, 0xbc, 0x91, 0x8f, 0x8f, 0xb0, 0x3b, + 0x16, 0x40, 0x19, 0x0e, 0x5d, 0x20, 0xdc, 0xdd, 0x19, 0x54, 0x87, 0x01, 0xd7, 0xdf, 0x08, 0x84, + 0xb9, 0x34, 0x7b, 0xb8, 0x4e, 0x2d, 0xfa, 0x1b, 0x81, 0x5e, 0xcd, 0xf4, 0x1f, 0x66, 0xd4, 0xd1, + 0x12, 0x9c, 0x96, 0xf9, 0x33, 0x97, 0xdc, 0x28, 0x0e, 0xc2, 0xce, 0x92, 0xdb, 0x74, 0x63, 0x66, + 0xea, 0x14, 0x67, 0x27, 0xa8, 0x26, 0xc2, 0x19, 0x70, 0x9c, 0xf9, 0x14, 0x7a, 0x0d, 0x86, 0xe4, + 0x01, 0x79, 0x39, 0x8f, 0xfd, 0x79, 0xf7, 0xfc, 0x57, 0x93, 0xa9, 0x2a, 0x4e, 0xc8, 0x25, 0x43, + 0xf4, 0x71, 0x0b, 0xc6, 0xf8, 0xef, 0x4b, 0x9d, 0x3a, 0x4f, 0xd9, 0xab, 0xe4, 0x11, 0x05, 0x5f, + 0x4d, 0xd0, 0x9c, 0x45, 0xb7, 0x77, 0x27, 0xc7, 0x92, 0x6d, 0x38, 0xc5, 0xd7, 0xfe, 0xca, 0x08, + 0x74, 0x87, 0x14, 0x24, 0xe3, 0x07, 0xac, 0xe3, 0x8e, 0x1f, 0xa0, 0xbb, 0xb4, 0x48, 0x1f, 0xfd, + 0xe7, 0xb0, 0xcc, 0x04, 0x57, 0x7d, 0xac, 0xdb, 0xf1, 0x6b, 0x98, 0xf1, 0x40, 0x21, 0x0c, 0x6e, + 0x12, 0xc7, 0x8b, 0x37, 0xf3, 0x39, 0x81, 0xba, 0xc4, 0x68, 0xa5, 0xf3, 0xef, 0x78, 0x2b, 0x16, + 0x9c, 0xd0, 0x0e, 0x0c, 0x6d, 0xf2, 0xb9, 0x28, 0x36, 0x4e, 0xcb, 0x87, 0x1d, 0xdc, 0xc4, 0x04, + 0xd7, 0x33, 0x4f, 0x34, 0x60, 0xc9, 0x8e, 0xc5, 0xaa, 0x19, 0xd1, 0x34, 0x5c, 0x8a, 0xe4, 0x97, + 0x7a, 0xd8, 0x7f, 0x28, 0xcd, 0xfb, 0x60, 0x24, 0x24, 0xb5, 0xc0, 0xaf, 0xb9, 0x1e, 0xa9, 0xcf, + 0xc8, 0xd3, 0xa5, 0x83, 0x64, 0x9c, 0x31, 0xd7, 0x0c, 0x36, 0x68, 0xe0, 0x04, 0x45, 0xb6, 0xc8, + 0x54, 0x16, 0x3a, 0xfd, 0x20, 0x44, 0x9c, 0x22, 0x2c, 0xe5, 0x94, 0xf3, 0xce, 0x68, 0xf2, 0x45, + 0x96, 0x6c, 0xc3, 0x29, 0xbe, 0xe8, 0x25, 0x80, 0x60, 0x9d, 0x07, 0xa4, 0xcd, 0xc4, 0xe2, 0x48, + 0xe1, 0x20, 0xaf, 0x3a, 0xc6, 0x33, 0x57, 0x25, 0x05, 0x6c, 0x50, 0x43, 0x57, 0x00, 0xf8, 0xb2, + 0x59, 0xeb, 0xb4, 0xe4, 0xee, 0x4a, 0xa6, 0x0c, 0x42, 0x55, 0x41, 0xee, 0xec, 0x4e, 0x76, 0x3b, + 0x70, 0x59, 0xd4, 0x8d, 0xf1, 0x38, 0xfa, 0x29, 0x18, 0x8a, 0xda, 0xcd, 0xa6, 0xa3, 0x0e, 0x1c, + 0x72, 0xcc, 0x85, 0xe5, 0x74, 0x0d, 0xa9, 0xc8, 0x1b, 0xb0, 0xe4, 0x88, 0x6e, 0x52, 0xf9, 0x2e, + 0xc4, 0x13, 0x5f, 0x45, 0xdc, 0x3c, 0xe1, 0x6e, 0xb5, 0xb7, 0xc9, 0xdd, 0x06, 0xce, 0xc0, 0xb9, + 0xb3, 0x3b, 0xf9, 0x40, 0xb2, 0x7d, 0x29, 0x10, 0xd9, 0xa9, 0x99, 0x34, 0xd1, 0x65, 0x59, 0x94, + 0x8a, 0xbe, 0xb6, 0xac, 0x95, 0xf2, 0xa4, 0x2e, 0x4a, 0xc5, 0x9a, 0x7b, 0x8f, 0x99, 0xf9, 0x30, + 0x5a, 0x86, 0x53, 0xb5, 0xc0, 0x8f, 0xc3, 0xc0, 0xf3, 0x78, 0x51, 0x36, 0xbe, 0xd1, 0xe5, 0x07, + 0x12, 0x0f, 0x8b, 0x6e, 0x9f, 0x9a, 0xeb, 0x46, 0xc1, 0x59, 0xcf, 0x51, 0x83, 0x3c, 0xad, 0x1c, + 0xc6, 0x72, 0x39, 0xab, 0x4e, 0xd0, 0x14, 0x12, 0x4a, 0xf9, 0x90, 0xf7, 0x51, 0x13, 0x7e, 0xf2, + 0xc4, 0x52, 0x7c, 0xb1, 0x67, 0x61, 0x84, 0xec, 0xc4, 0x24, 0xf4, 0x1d, 0xef, 0x1a, 0x5e, 0x92, + 0xde, 0x7f, 0xb6, 0x30, 0x2f, 0x18, 0xed, 0x38, 0x81, 0x85, 0x6c, 0xe5, 0x72, 0x32, 0xd2, 0xc0, + 0xb9, 0xcb, 0x49, 0x3a, 0x98, 0xec, 0xaf, 0x15, 0x13, 0x06, 0xeb, 0x3d, 0x39, 0x1f, 0x65, 0xf5, + 0x86, 0x64, 0x61, 0x26, 0x06, 0x10, 0x1b, 0xb1, 0x3c, 0x39, 0xab, 0x7a, 0x43, 0x2b, 0x26, 0x23, + 0x9c, 0xe4, 0x8b, 0xb6, 0xa0, 0xb4, 0x19, 0x44, 0xb1, 0xdc, 0x9e, 0x1d, 0x72, 0x27, 0x78, 0x29, + 0x88, 0x62, 0x66, 0x65, 0xa9, 0xd7, 0xa6, 0x2d, 0x11, 0xe6, 0x3c, 0xe8, 0x1e, 0x3d, 0xda, 0x74, + 0xc2, 0x7a, 0x34, 0xc7, 0x8a, 0x36, 0x0c, 0x30, 0xf3, 0x4a, 0x19, 0xd3, 0x55, 0x0d, 0xc2, 0x26, + 0x9e, 0xfd, 0x17, 0x56, 0xe2, 0x88, 0xe8, 0x06, 0x8b, 0xc0, 0xdf, 0x26, 0x3e, 0x15, 0x51, 0x66, + 0xcc, 0xdf, 0x8f, 0xa6, 0xf2, 0x99, 0xdf, 0xd2, 0xab, 0x7e, 0xe2, 0x2d, 0x4a, 0x61, 0x8a, 0x91, + 0x30, 0xc2, 0x03, 0x3f, 0x64, 0x25, 0x13, 0xd3, 0x0b, 0x79, 0xec, 0xdb, 0xcc, 0xe2, 0x0c, 0xfb, + 0xe6, 0xb8, 0xdb, 0x9f, 0xb3, 0x60, 0x68, 0xd6, 0xa9, 0x6d, 0x05, 0x1b, 0x1b, 0xe8, 0x29, 0x28, + 0xd7, 0xdb, 0xa1, 0x99, 0x23, 0xaf, 0x3c, 0x3f, 0xf3, 0xa2, 0x1d, 0x2b, 0x0c, 0x3a, 0xf5, 0x37, + 0x9c, 0x9a, 0x2c, 0xd1, 0x50, 0xe4, 0x53, 0xff, 0x22, 0x6b, 0xc1, 0x02, 0x42, 0x87, 0xbf, 0xe9, + 0xec, 0xc8, 0x87, 0xd3, 0xe7, 0x53, 0xcb, 0x1a, 0x84, 0x4d, 0x3c, 0xfb, 0xdf, 0x58, 0x30, 0x31, + 0xeb, 0x44, 0x6e, 0x6d, 0xa6, 0x1d, 0x6f, 0xce, 0xba, 0xf1, 0x7a, 0xbb, 0xb6, 0x45, 0x62, 0x5e, + 0xca, 0x83, 0xf6, 0xb2, 0x1d, 0xd1, 0x15, 0xa8, 0xb6, 0xcb, 0xaa, 0x97, 0xd7, 0x44, 0x3b, 0x56, + 0x18, 0xe8, 0x35, 0x18, 0x6e, 0x39, 0x51, 0x74, 0x2b, 0x08, 0xeb, 0x98, 0x6c, 0xe4, 0x53, 0xec, + 0xa7, 0x4a, 0x6a, 0x21, 0x89, 0x31, 0xd9, 0x10, 0xd1, 0x1e, 0x9a, 0x3e, 0x36, 0x99, 0xd9, 0x3f, + 0x6f, 0xc1, 0xe9, 0x59, 0xe2, 0x84, 0x24, 0x64, 0xb5, 0x81, 0xd4, 0x8b, 0xa0, 0x57, 0xa1, 0x1c, + 0xd3, 0x16, 0xda, 0x23, 0x2b, 0xdf, 0x1e, 0xb1, 0x38, 0x8d, 0x35, 0x41, 0x1c, 0x2b, 0x36, 0xf6, + 0xa7, 0x2d, 0x78, 0x28, 0xab, 0x2f, 0x73, 0x5e, 0xd0, 0xae, 0xdf, 0x8b, 0x0e, 0xfd, 0x7d, 0x0b, + 0x46, 0xd8, 0xd9, 0xf7, 0x3c, 0x89, 0x1d, 0xd7, 0xeb, 0xaa, 0x4b, 0x68, 0xf5, 0x59, 0x97, 0xf0, + 0x1c, 0x0c, 0x6c, 0x06, 0x4d, 0x92, 0x8e, 0xdb, 0xb8, 0x14, 0x34, 0x09, 0x66, 0x10, 0xf4, 0x0c, + 0x9d, 0x84, 0xae, 0x1f, 0x3b, 0x74, 0x39, 0xca, 0xb3, 0x81, 0x71, 0x3e, 0x01, 0x55, 0x33, 0x36, + 0x71, 0xec, 0x7f, 0x5d, 0x81, 0x21, 0x11, 0x64, 0xd4, 0x77, 0x69, 0x19, 0xe9, 0xc2, 0x29, 0xf4, + 0x74, 0xe1, 0x44, 0x30, 0x58, 0x63, 0x05, 0x52, 0x85, 0x79, 0x7e, 0x25, 0x97, 0xa8, 0x34, 0x5e, + 0x73, 0x55, 0x77, 0x8b, 0xff, 0xc7, 0x82, 0x15, 0xfa, 0xac, 0x05, 0xe3, 0xb5, 0xc0, 0xf7, 0x49, + 0x4d, 0xdb, 0x8e, 0x03, 0x79, 0x04, 0x1f, 0xcd, 0x25, 0x89, 0xea, 0x63, 0xd5, 0x14, 0x00, 0xa7, + 0xd9, 0xa3, 0x17, 0x60, 0x94, 0x8f, 0xd9, 0xf5, 0xc4, 0x81, 0x86, 0x2e, 0x57, 0x67, 0x02, 0x71, + 0x12, 0x17, 0x4d, 0xf1, 0x83, 0x21, 0x51, 0x18, 0x6e, 0x50, 0xfb, 0x7d, 0x8d, 0x92, 0x70, 0x06, + 0x06, 0x0a, 0x01, 0x85, 0x64, 0x23, 0x24, 0xd1, 0xa6, 0x08, 0xc2, 0x62, 0x76, 0xeb, 0xd0, 0xdd, + 0x15, 0x85, 0xc0, 0x5d, 0x94, 0x70, 0x06, 0x75, 0xb4, 0x25, 0x7c, 0x08, 0xe5, 0x3c, 0xe4, 0xb9, + 0xf8, 0xcc, 0x3d, 0x5d, 0x09, 0x93, 0x50, 0x62, 0xaa, 0x8b, 0xd9, 0xcb, 0x45, 0x9e, 0x88, 0xc8, + 0x14, 0x1b, 0xe6, 0xed, 0x68, 0x1e, 0x4e, 0xa4, 0x8a, 0xed, 0x45, 0xe2, 0xe0, 0x41, 0x25, 0x9d, + 0xa5, 0xca, 0xf4, 0x45, 0xb8, 0xeb, 0x09, 0xd3, 0xbf, 0x34, 0xbc, 0x8f, 0x7f, 0xa9, 0xa3, 0x42, + 0x7d, 0xf9, 0x91, 0xc0, 0x8b, 0xb9, 0x0c, 0x40, 0x5f, 0x71, 0xbd, 0x9f, 0x4a, 0xc5, 0xf5, 0x8e, + 0xb2, 0x0e, 0x5c, 0xcf, 0xa7, 0x03, 0x07, 0x0f, 0xe2, 0xbd, 0x97, 0x41, 0xb9, 0xff, 0xcb, 0x02, + 0xf9, 0x5d, 0xe7, 0x9c, 0xda, 0x26, 0xa1, 0x53, 0x06, 0xbd, 0x03, 0xc6, 0x94, 0x6b, 0x82, 0x9b, + 0x44, 0x16, 0x9b, 0x35, 0xca, 0x76, 0xc6, 0x09, 0x28, 0x4e, 0x61, 0xa3, 0x69, 0xa8, 0xd0, 0x71, + 0xe2, 0x8f, 0x72, 0xbd, 0xaf, 0xdc, 0x1f, 0x33, 0xab, 0x8b, 0xe2, 0x29, 0x8d, 0x83, 0x02, 0x38, + 0xe9, 0x39, 0x51, 0xcc, 0x7a, 0x50, 0xed, 0xf8, 0xb5, 0xbb, 0x2c, 0xc9, 0xc2, 0x32, 0x9b, 0x96, + 0xd2, 0x84, 0x70, 0x37, 0x6d, 0xfb, 0x3f, 0x96, 0x60, 0x34, 0x21, 0x19, 0x0f, 0x68, 0x30, 0x3c, + 0x05, 0x65, 0xa9, 0xc3, 0xd3, 0xb5, 0xa7, 0x94, 0xa2, 0x57, 0x18, 0x54, 0x69, 0xad, 0x6b, 0xad, + 0x9a, 0x36, 0x70, 0x0c, 0x85, 0x8b, 0x4d, 0x3c, 0x26, 0x94, 0x63, 0x2f, 0x9a, 0xf3, 0x5c, 0xe2, + 0xc7, 0xbc, 0x9b, 0xf9, 0x08, 0xe5, 0xb5, 0xa5, 0xaa, 0x49, 0x54, 0x0b, 0xe5, 0x14, 0x00, 0xa7, + 0xd9, 0xa3, 0x8f, 0x5a, 0x30, 0xea, 0xdc, 0x8a, 0x74, 0x15, 0x6f, 0x11, 0xc1, 0x7b, 0x48, 0x25, + 0x95, 0x28, 0x0c, 0xce, 0xbd, 0xfa, 0x89, 0x26, 0x9c, 0x64, 0x8a, 0x5e, 0xb7, 0x00, 0x91, 0x1d, + 0x52, 0x93, 0x31, 0xc6, 0xa2, 0x2f, 0x83, 0x79, 0xec, 0xe0, 0x2f, 0x74, 0xd1, 0xe5, 0x52, 0xbd, + 0xbb, 0x1d, 0x67, 0xf4, 0x01, 0x5d, 0x06, 0x54, 0x77, 0x23, 0x67, 0xdd, 0x23, 0x73, 0x41, 0x53, + 0x66, 0xe3, 0x8a, 0xc3, 0xe9, 0xb3, 0x62, 0x9c, 0xd1, 0x7c, 0x17, 0x06, 0xce, 0x78, 0x8a, 0xcd, + 0xb2, 0x30, 0xd8, 0xe9, 0x5c, 0x0b, 0x3d, 0xa6, 0x25, 0xcc, 0x59, 0x26, 0xda, 0xb1, 0xc2, 0xb0, + 0xff, 0xb2, 0xa8, 0x96, 0xb2, 0x0e, 0xa8, 0x77, 0x8c, 0xc0, 0x5e, 0xeb, 0xee, 0x03, 0x7b, 0x75, + 0xd8, 0x51, 0x77, 0x8e, 0x79, 0x22, 0x25, 0xb5, 0x70, 0x8f, 0x52, 0x52, 0x7f, 0xc6, 0x4a, 0xd4, + 0x77, 0x1b, 0x3e, 0xff, 0x52, 0xbe, 0xc1, 0xfc, 0x53, 0x3c, 0x24, 0x2a, 0xa5, 0x57, 0x52, 0x91, + 0x70, 0x4f, 0x41, 0x79, 0xc3, 0x73, 0x58, 0x55, 0x12, 0xb6, 0x50, 0x8d, 0x70, 0xad, 0x8b, 0xa2, + 0x1d, 0x2b, 0x0c, 0x2a, 0xf5, 0x0d, 0xa2, 0x07, 0x92, 0xda, 0xff, 0xb9, 0x08, 0xc3, 0x86, 0xc6, + 0xcf, 0x34, 0xdf, 0xac, 0xfb, 0xcc, 0x7c, 0x2b, 0x1c, 0xc0, 0x7c, 0xfb, 0x69, 0xa8, 0xd4, 0xa4, + 0x36, 0xca, 0xa7, 0x5e, 0x7d, 0x5a, 0xc7, 0x69, 0x85, 0xa4, 0x9a, 0xb0, 0xe6, 0x89, 0x16, 0x12, + 0x69, 0x8f, 0x09, 0xbf, 0x40, 0x56, 0x5e, 0xa2, 0xd0, 0x68, 0xdd, 0xcf, 0xa4, 0xcf, 0xf1, 0x4b, + 0xfb, 0x9f, 0xe3, 0xdb, 0xdf, 0xb2, 0xd4, 0xc7, 0x3d, 0x86, 0xfa, 0x36, 0x37, 0x93, 0xf5, 0x6d, + 0x2e, 0xe4, 0x32, 0xcc, 0x3d, 0x0a, 0xdb, 0x5c, 0x85, 0xa1, 0xb9, 0xa0, 0xd9, 0x74, 0xfc, 0x3a, + 0xfa, 0x41, 0x18, 0xaa, 0xf1, 0x9f, 0xc2, 0x87, 0xc6, 0x4e, 0xaa, 0x05, 0x14, 0x4b, 0x18, 0x7a, + 0x04, 0x06, 0x9c, 0xb0, 0x21, 0xfd, 0x66, 0x2c, 0xa2, 0x6c, 0x26, 0x6c, 0x44, 0x98, 0xb5, 0xda, + 0xff, 0x62, 0x00, 0x58, 0x20, 0x87, 0x13, 0x92, 0xfa, 0x5a, 0xc0, 0xca, 0xcc, 0x1e, 0xe9, 0xf9, + 0xae, 0xde, 0xd4, 0xdd, 0xcf, 0x67, 0xbc, 0xc6, 0x39, 0x5f, 0xf1, 0xb8, 0xcf, 0xf9, 0xb2, 0x8f, + 0x6e, 0x07, 0xee, 0xa3, 0xa3, 0x5b, 0xfb, 0x93, 0x16, 0x20, 0x15, 0xfd, 0xa3, 0x63, 0x2b, 0xa6, + 0xa1, 0xa2, 0xe2, 0x80, 0x84, 0x01, 0xa8, 0x45, 0x84, 0x04, 0x60, 0x8d, 0xd3, 0xc7, 0x4e, 0xfe, + 0x71, 0x29, 0xbf, 0x8b, 0xc9, 0x60, 0x7e, 0x26, 0xf5, 0x85, 0x38, 0xb7, 0x7f, 0xb7, 0x00, 0x0f, + 0x70, 0xd3, 0x61, 0xd9, 0xf1, 0x9d, 0x06, 0x69, 0xd2, 0x5e, 0xf5, 0x1b, 0x2d, 0x53, 0xa3, 0x5b, + 0x48, 0x57, 0x86, 0xde, 0x1f, 0x76, 0xed, 0xf2, 0x35, 0xc7, 0x57, 0xd9, 0xa2, 0xef, 0xc6, 0x98, + 0x11, 0x47, 0x11, 0x94, 0xe5, 0x65, 0x2e, 0x42, 0x16, 0xe7, 0xc4, 0x48, 0x89, 0x25, 0xa1, 0x65, + 0x09, 0x56, 0x8c, 0xa8, 0x2a, 0xf5, 0x82, 0xda, 0x16, 0x26, 0xad, 0x20, 0xad, 0x4a, 0x97, 0x44, + 0x3b, 0x56, 0x18, 0x76, 0x13, 0xc6, 0xe5, 0x18, 0xb6, 0xae, 0x90, 0x0e, 0x26, 0x1b, 0x54, 0xff, + 0xd4, 0x64, 0x93, 0x71, 0xbf, 0x8c, 0xd2, 0x3f, 0x73, 0x26, 0x10, 0x27, 0x71, 0x65, 0xe5, 0xd9, + 0x42, 0x76, 0xe5, 0x59, 0xfb, 0x77, 0x2d, 0x48, 0x2b, 0x40, 0xa3, 0xce, 0xa6, 0xb5, 0x67, 0x9d, + 0xcd, 0x03, 0x54, 0xaa, 0xfc, 0x49, 0x18, 0x76, 0x62, 0x6a, 0xe1, 0x70, 0x6f, 0x44, 0xf1, 0xee, + 0x4e, 0xd1, 0x96, 0x83, 0xba, 0xbb, 0xe1, 0x32, 0x2f, 0x84, 0x49, 0xce, 0x7e, 0xdd, 0x82, 0xca, + 0x7c, 0xd8, 0x39, 0x78, 0x0e, 0x54, 0x77, 0x86, 0x53, 0xe1, 0x40, 0x19, 0x4e, 0x32, 0x87, 0xaa, + 0xd8, 0x2b, 0x87, 0xca, 0xfe, 0xeb, 0x01, 0x38, 0xd9, 0x95, 0xd4, 0x87, 0x9e, 0x87, 0x11, 0xf5, + 0x95, 0xa4, 0x0b, 0xb2, 0x62, 0x46, 0xc5, 0x6a, 0x18, 0x4e, 0x60, 0xf6, 0xb1, 0x54, 0x17, 0xe1, + 0x54, 0x48, 0x5e, 0x6d, 0x93, 0x36, 0x99, 0xd9, 0x88, 0x49, 0x58, 0x25, 0xb5, 0xc0, 0xaf, 0xf3, + 0x42, 0xb5, 0xc5, 0xd9, 0x07, 0x6f, 0xef, 0x4e, 0x9e, 0xc2, 0xdd, 0x60, 0x9c, 0xf5, 0x0c, 0x6a, + 0xc1, 0xa8, 0x67, 0xda, 0xce, 0x62, 0xcb, 0x76, 0x57, 0x66, 0xb7, 0x9a, 0xad, 0x89, 0x66, 0x9c, + 0x64, 0x90, 0x34, 0xc0, 0x4b, 0xf7, 0xc8, 0x00, 0xff, 0x88, 0x36, 0xc0, 0x79, 0x50, 0xcc, 0x7b, + 0x72, 0x4e, 0xea, 0xec, 0xc7, 0x02, 0x3f, 0x8c, 0x4d, 0xfd, 0x22, 0x94, 0x65, 0xc0, 0x60, 0x5f, + 0x81, 0x76, 0x26, 0x9d, 0x1e, 0xb2, 0xfd, 0x09, 0x78, 0xf3, 0x85, 0x30, 0x34, 0x06, 0xf3, 0x6a, + 0x10, 0xcf, 0x78, 0x5e, 0x70, 0x8b, 0x9a, 0x2b, 0xd7, 0x22, 0x22, 0x7c, 0x62, 0xf6, 0x9d, 0x02, + 0x64, 0x6c, 0x2f, 0xe9, 0x9a, 0xd4, 0x36, 0x52, 0x62, 0x4d, 0x1e, 0xcc, 0x4e, 0x42, 0x3b, 0x3c, + 0xa8, 0x92, 0x5b, 0x03, 0xef, 0xce, 0x7b, 0x7b, 0xac, 0xe3, 0x2c, 0x95, 0xa4, 0x54, 0xb1, 0x96, + 0xe7, 0x01, 0xb4, 0x69, 0x2b, 0xf2, 0x88, 0x54, 0xa0, 0x84, 0xb6, 0x80, 0xb1, 0x81, 0x85, 0x9e, + 0x83, 0x61, 0xd7, 0x8f, 0x62, 0xc7, 0xf3, 0x2e, 0xb9, 0x7e, 0x2c, 0xdc, 0xbe, 0xca, 0xec, 0x59, + 0xd4, 0x20, 0x6c, 0xe2, 0x9d, 0x7d, 0x9b, 0xf1, 0xfd, 0x0e, 0xf2, 0xdd, 0x37, 0xe1, 0xa1, 0x05, + 0x37, 0x56, 0xd9, 0x6f, 0x6a, 0xbe, 0x51, 0xcb, 0x55, 0xc9, 0x2a, 0xab, 0x67, 0xbe, 0xa7, 0x91, + 0x7d, 0x56, 0x48, 0x26, 0xcb, 0xa5, 0xb3, 0xcf, 0xec, 0xe7, 0xe1, 0xf4, 0x82, 0x1b, 0x5f, 0x74, + 0x3d, 0x72, 0x40, 0x26, 0xf6, 0xef, 0x0c, 0xc2, 0x88, 0x99, 0xe9, 0x7d, 0x10, 0x71, 0xfd, 0x69, + 0x6a, 0x9c, 0x8a, 0xb7, 0x73, 0xd5, 0x89, 0xee, 0x8d, 0x43, 0xa7, 0x9d, 0x67, 0x8f, 0x98, 0x61, + 0x9f, 0x6a, 0x9e, 0xd8, 0xec, 0x00, 0xba, 0x05, 0xa5, 0x0d, 0x96, 0x1d, 0x55, 0xcc, 0x23, 0x16, + 0x27, 0x6b, 0x44, 0xf5, 0x72, 0xe4, 0xf9, 0x55, 0x9c, 0x1f, 0xb5, 0x29, 0xc2, 0x64, 0x52, 0xae, + 0x11, 0xb3, 0x2e, 0x94, 0x95, 0xc2, 0xe8, 0xa5, 0x12, 0x4a, 0x77, 0xa1, 0x12, 0x12, 0x02, 0x7a, + 0xf0, 0x1e, 0x09, 0x68, 0x96, 0xe9, 0x16, 0x6f, 0x32, 0x8b, 0x57, 0x24, 0xd9, 0x0c, 0xb1, 0x41, + 0x30, 0x32, 0xdd, 0x12, 0x60, 0x9c, 0xc6, 0x47, 0x1f, 0x54, 0x22, 0xbe, 0x9c, 0x87, 0xc7, 0xdc, + 0x9c, 0xd1, 0x47, 0x2d, 0xdd, 0x3f, 0x59, 0x80, 0xb1, 0x05, 0xbf, 0xbd, 0xba, 0xb0, 0xda, 0x5e, + 0xf7, 0xdc, 0xda, 0x15, 0xd2, 0xa1, 0x22, 0x7c, 0x8b, 0x74, 0x16, 0xe7, 0xc5, 0x0a, 0x52, 0x73, + 0xe6, 0x0a, 0x6d, 0xc4, 0x1c, 0x46, 0x85, 0xd1, 0x86, 0xeb, 0x37, 0x48, 0xd8, 0x0a, 0x5d, 0xe1, + 0xcc, 0x36, 0x84, 0xd1, 0x45, 0x0d, 0xc2, 0x26, 0x1e, 0xa5, 0x1d, 0xdc, 0xf2, 0x49, 0x98, 0x36, + 0xfd, 0x57, 0x68, 0x23, 0xe6, 0x30, 0x8a, 0x14, 0x87, 0x6d, 0xe1, 0x2b, 0x32, 0x90, 0xd6, 0x68, + 0x23, 0xe6, 0x30, 0xba, 0xd2, 0xa3, 0xf6, 0x3a, 0x0b, 0x75, 0x4a, 0x65, 0xf4, 0x54, 0x79, 0x33, + 0x96, 0x70, 0x8a, 0xba, 0x45, 0x3a, 0xf3, 0x4e, 0xec, 0xa4, 0xd3, 0x1e, 0xaf, 0xf0, 0x66, 0x2c, + 0xe1, 0xac, 0x94, 0x6e, 0x72, 0x38, 0xbe, 0xe7, 0x4a, 0xe9, 0x26, 0xbb, 0xdf, 0xc3, 0xe3, 0xf0, + 0xf7, 0x0a, 0x30, 0x62, 0x06, 0x28, 0xa2, 0x46, 0xca, 0x4c, 0x5f, 0xe9, 0xaa, 0xc4, 0xfe, 0xf6, + 0xac, 0x5b, 0x4a, 0x1b, 0x6e, 0x1c, 0xb4, 0xa2, 0xa7, 0x89, 0xdf, 0x70, 0x7d, 0xc2, 0x62, 0x35, + 0x78, 0x60, 0x63, 0x22, 0xfa, 0x71, 0x2e, 0xa8, 0x93, 0xbb, 0xb1, 0xf3, 0xef, 0xc5, 0x4d, 0x2e, + 0x37, 0xe0, 0x64, 0x57, 0x7e, 0x6d, 0x1f, 0x66, 0xcf, 0xbe, 0xf5, 0x0f, 0x6c, 0x0c, 0xc3, 0x94, + 0xb0, 0x2c, 0x21, 0x37, 0x07, 0x27, 0xf9, 0xe2, 0xa5, 0x9c, 0x58, 0xba, 0xa4, 0xca, 0x99, 0x66, + 0xa7, 0x35, 0xd7, 0xd3, 0x40, 0xdc, 0x8d, 0x6f, 0x7f, 0xca, 0x82, 0xd1, 0x44, 0xca, 0x73, 0x4e, + 0x06, 0x1a, 0x5b, 0xdd, 0x01, 0x8b, 0xd1, 0x65, 0x39, 0x13, 0x45, 0xa6, 0xc0, 0xf5, 0xea, 0xd6, + 0x20, 0x6c, 0xe2, 0xd9, 0x9f, 0x2b, 0x40, 0x59, 0x86, 0x14, 0xf5, 0xd1, 0x95, 0x4f, 0x58, 0x30, + 0xaa, 0x4e, 0xc8, 0x98, 0x4b, 0xb3, 0x90, 0x47, 0x06, 0x16, 0xed, 0x81, 0x72, 0x8a, 0xf8, 0x1b, + 0x81, 0xde, 0x2d, 0x60, 0x93, 0x19, 0x4e, 0xf2, 0x46, 0xd7, 0x01, 0xa2, 0x4e, 0x14, 0x93, 0xa6, + 0xe1, 0x5c, 0xb5, 0x8d, 0x59, 0x36, 0x55, 0x0b, 0x42, 0x42, 0xe7, 0xd4, 0xd5, 0xa0, 0x4e, 0xaa, + 0x0a, 0x53, 0x9b, 0x6d, 0xba, 0x0d, 0x1b, 0x94, 0xec, 0x5f, 0x2b, 0xc0, 0x89, 0x74, 0x97, 0xd0, + 0x7b, 0x60, 0x44, 0x72, 0x37, 0x36, 0xe1, 0x32, 0x20, 0x6a, 0x04, 0x1b, 0xb0, 0x3b, 0xbb, 0x93, + 0x93, 0xdd, 0xb7, 0xec, 0x4e, 0x99, 0x28, 0x38, 0x41, 0x8c, 0x1f, 0x53, 0x8a, 0xf3, 0xf4, 0xd9, + 0xce, 0x4c, 0xab, 0x25, 0xce, 0x1a, 0x8d, 0x63, 0x4a, 0x13, 0x8a, 0x53, 0xd8, 0x68, 0x15, 0x4e, + 0x1b, 0x2d, 0x57, 0x89, 0xdb, 0xd8, 0x5c, 0x0f, 0x42, 0xb9, 0xeb, 0x7b, 0x44, 0x87, 0x5f, 0x76, + 0xe3, 0xe0, 0xcc, 0x27, 0xa9, 0x85, 0x51, 0x73, 0x5a, 0x4e, 0xcd, 0x8d, 0x3b, 0xc2, 0x5b, 0xac, + 0xe4, 0xe1, 0x9c, 0x68, 0xc7, 0x0a, 0xc3, 0xfe, 0xd5, 0x01, 0x38, 0xc1, 0xe3, 0x0d, 0x89, 0x0a, + 0xa7, 0x45, 0xef, 0x81, 0x4a, 0x14, 0x3b, 0x21, 0xdf, 0xf2, 0x5b, 0x07, 0x96, 0x01, 0x3a, 0xe1, + 0x59, 0x12, 0xc1, 0x9a, 0x1e, 0x7a, 0x89, 0x55, 0x8b, 0x72, 0xa3, 0x4d, 0x46, 0xbd, 0x70, 0x77, + 0x0e, 0x85, 0x8b, 0x8a, 0x02, 0x36, 0xa8, 0xa1, 0x1f, 0x87, 0x52, 0x6b, 0xd3, 0x89, 0xa4, 0xb7, + 0xeb, 0x09, 0xb9, 0xe0, 0x56, 0x69, 0xe3, 0x9d, 0xdd, 0xc9, 0x33, 0xe9, 0x57, 0x65, 0x00, 0xcc, + 0x1f, 0x32, 0xc5, 0xe5, 0xc0, 0xfe, 0x37, 0x9a, 0xd4, 0xc3, 0x4e, 0xf5, 0xd2, 0x4c, 0xfa, 0x0e, + 0x8c, 0x79, 0xd6, 0x8a, 0x05, 0x94, 0x2e, 0xee, 0x4d, 0xce, 0xb2, 0x4e, 0x91, 0x07, 0x93, 0xaa, + 0xfb, 0x92, 0x06, 0x61, 0x13, 0x0f, 0x7d, 0xb2, 0x3b, 0x1a, 0x75, 0xe8, 0x08, 0x52, 0x15, 0xfa, + 0x8d, 0x43, 0xbd, 0x00, 0x15, 0xd1, 0xd5, 0xb5, 0x00, 0x3d, 0x0f, 0x23, 0xdc, 0x99, 0x32, 0x1b, + 0x3a, 0x7e, 0x6d, 0x33, 0xed, 0x02, 0x59, 0x33, 0x60, 0x38, 0x81, 0x69, 0x2f, 0xc3, 0x40, 0x9f, + 0xd2, 0xaa, 0xaf, 0x9d, 0xed, 0x8b, 0x50, 0xa6, 0xe4, 0xe4, 0xf6, 0x25, 0x0f, 0x92, 0x01, 0x94, + 0xe5, 0xfd, 0x78, 0xc8, 0x86, 0xa2, 0xeb, 0xc8, 0xa8, 0x03, 0xb5, 0x84, 0x16, 0xa3, 0xa8, 0xcd, + 0xa6, 0x1d, 0x05, 0xa2, 0xc7, 0xa1, 0x48, 0x76, 0x5a, 0xe9, 0xf0, 0x82, 0x0b, 0x3b, 0x2d, 0x37, + 0x24, 0x11, 0x45, 0x22, 0x3b, 0x2d, 0x74, 0x16, 0x0a, 0x6e, 0x5d, 0xcc, 0x48, 0x10, 0x38, 0x85, + 0xc5, 0x79, 0x5c, 0x70, 0xeb, 0xf6, 0x0e, 0x54, 0xd4, 0x85, 0x7c, 0x68, 0x4b, 0xda, 0x26, 0x56, + 0x1e, 0xf1, 0xa6, 0x92, 0x6e, 0x0f, 0xab, 0xa4, 0x0d, 0xa0, 0x33, 0xe9, 0xf3, 0xd2, 0x65, 0xe7, + 0x60, 0xa0, 0x16, 0x88, 0x1a, 0x28, 0x65, 0x4d, 0x86, 0x19, 0x25, 0x0c, 0x62, 0xdf, 0x80, 0xb1, + 0x2b, 0x7e, 0x70, 0x8b, 0xdd, 0x9b, 0xc3, 0xca, 0xc4, 0x52, 0xc2, 0x1b, 0xf4, 0x47, 0xda, 0x04, + 0x66, 0x50, 0xcc, 0x61, 0xaa, 0x80, 0x65, 0xa1, 0x57, 0x01, 0x4b, 0xfb, 0x43, 0x16, 0x8c, 0xa8, + 0x94, 0xdc, 0x85, 0xed, 0x2d, 0x4a, 0xb7, 0x11, 0x06, 0xed, 0x56, 0x9a, 0x2e, 0xbb, 0xfb, 0x13, + 0x73, 0x98, 0x99, 0xab, 0x5e, 0xd8, 0x27, 0x57, 0xfd, 0x1c, 0x0c, 0x6c, 0xb9, 0x7e, 0x3d, 0xed, + 0x32, 0xbc, 0xe2, 0xfa, 0x75, 0xcc, 0x20, 0xb4, 0x0b, 0x27, 0x54, 0x17, 0xa4, 0xf1, 0xf1, 0x3c, + 0x8c, 0xac, 0xb7, 0x5d, 0xaf, 0x2e, 0xeb, 0xdf, 0xa6, 0x96, 0xcb, 0xac, 0x01, 0xc3, 0x09, 0x4c, + 0x74, 0x1e, 0x60, 0xdd, 0xf5, 0x9d, 0xb0, 0xb3, 0xaa, 0xad, 0x1d, 0xa5, 0x00, 0x67, 0x15, 0x04, + 0x1b, 0x58, 0xf6, 0x67, 0x8a, 0x30, 0x96, 0x4c, 0x4c, 0xee, 0xc3, 0x7d, 0xf0, 0x38, 0x94, 0x58, + 0xae, 0x72, 0xfa, 0xd3, 0xf2, 0x92, 0xb1, 0x1c, 0x86, 0x22, 0x18, 0xe4, 0x8b, 0x39, 0x9f, 0xfb, + 0x13, 0x55, 0x27, 0x95, 0x9f, 0x91, 0x45, 0xe5, 0x0a, 0xb7, 0xad, 0x60, 0x85, 0x3e, 0x6a, 0xc1, + 0x50, 0xd0, 0x32, 0x0b, 0x1f, 0xbe, 0x3b, 0xcf, 0xa4, 0x6d, 0x91, 0xc9, 0x29, 0x76, 0x7c, 0xea, + 0xd3, 0xcb, 0xcf, 0x21, 0x59, 0x9f, 0xfd, 0x31, 0x18, 0x31, 0x31, 0xf7, 0xdb, 0xf4, 0x95, 0xcd, + 0x4d, 0xdf, 0x27, 0xcc, 0x49, 0x21, 0xd2, 0xd2, 0xfb, 0x58, 0x6e, 0xd7, 0xa0, 0x54, 0x53, 0xa1, + 0x4b, 0x77, 0x55, 0x35, 0x5d, 0x95, 0x6d, 0x62, 0xc7, 0xc2, 0x9c, 0x9a, 0xfd, 0x2d, 0xcb, 0x98, + 0x1f, 0x98, 0x44, 0x8b, 0x75, 0x14, 0x42, 0xb1, 0xb1, 0xbd, 0x25, 0xd4, 0xfc, 0xe5, 0x9c, 0x86, + 0x77, 0x61, 0x7b, 0x4b, 0xcf, 0x71, 0xb3, 0x15, 0x53, 0x66, 0x7d, 0x38, 0xc3, 0x13, 0xd5, 0x0b, + 0x8a, 0xfb, 0x57, 0x2f, 0xb0, 0x5f, 0x2f, 0xc0, 0xc9, 0xae, 0x49, 0x85, 0x5e, 0x83, 0x52, 0x48, + 0xdf, 0x52, 0xbc, 0xde, 0x52, 0x6e, 0xf5, 0x06, 0xa2, 0xc5, 0xba, 0x56, 0x9f, 0xc9, 0x76, 0xcc, + 0x59, 0xa2, 0xcb, 0x80, 0x74, 0x80, 0x9d, 0xf2, 0xc4, 0xf3, 0x57, 0x56, 0x51, 0x38, 0x33, 0x5d, + 0x18, 0x38, 0xe3, 0x29, 0xf4, 0x42, 0xda, 0xa1, 0x5f, 0x4c, 0x9e, 0x24, 0xed, 0xe5, 0x9b, 0xb7, + 0x7f, 0xab, 0x00, 0xa3, 0x89, 0x3a, 0x94, 0xc8, 0x83, 0x32, 0xf1, 0xd8, 0x31, 0x9f, 0x54, 0x36, + 0x87, 0xbd, 0x55, 0x42, 0x29, 0xc8, 0x0b, 0x82, 0x2e, 0x56, 0x1c, 0xee, 0x8f, 0xe0, 0x9c, 0xe7, + 0x61, 0x44, 0x76, 0xe8, 0xdd, 0x4e, 0xd3, 0x13, 0x03, 0xa8, 0xe6, 0xe8, 0x05, 0x03, 0x86, 0x13, + 0x98, 0xf6, 0xef, 0x15, 0x61, 0x82, 0x9f, 0x8b, 0xd6, 0xd5, 0xcc, 0x5b, 0x96, 0xfe, 0x84, 0x5f, + 0xd0, 0xd5, 0x62, 0xad, 0x3c, 0xae, 0x4e, 0xee, 0xc5, 0xa8, 0xaf, 0x98, 0xd2, 0x2f, 0xa6, 0x62, + 0x4a, 0xf9, 0x16, 0xaf, 0x71, 0x44, 0x3d, 0xfa, 0xde, 0x0a, 0x32, 0xfd, 0x27, 0x05, 0x18, 0x4f, + 0xdd, 0x90, 0x85, 0x3e, 0x93, 0xbc, 0x54, 0xc1, 0xca, 0xe3, 0xcc, 0x68, 0xcf, 0x4b, 0x93, 0x0e, + 0x76, 0xb5, 0xc2, 0x3d, 0x5a, 0x2a, 0xf6, 0x37, 0x0b, 0x30, 0x96, 0xbc, 0xda, 0xeb, 0x3e, 0x1c, + 0xa9, 0xb7, 0x42, 0x85, 0xdd, 0x5e, 0xc3, 0x6e, 0xa4, 0xe7, 0x47, 0x4e, 0xfc, 0xa2, 0x10, 0xd9, + 0x88, 0x35, 0xfc, 0xbe, 0xb8, 0xb1, 0xc2, 0xfe, 0x67, 0x16, 0x9c, 0xe1, 0x6f, 0x99, 0x9e, 0x87, + 0x7f, 0x27, 0x6b, 0x74, 0x5f, 0xce, 0xb7, 0x83, 0xa9, 0x2a, 0xc7, 0xfb, 0x8d, 0x2f, 0xbb, 0x40, + 0x5a, 0xf4, 0x36, 0x39, 0x15, 0xee, 0xc3, 0xce, 0x1e, 0x68, 0x32, 0xd8, 0xdf, 0x2c, 0x82, 0xbe, + 0x33, 0x1b, 0xb9, 0x22, 0xeb, 0x3d, 0x97, 0x6a, 0xcf, 0xd5, 0x8e, 0x5f, 0xd3, 0xb7, 0x73, 0x97, + 0x53, 0x49, 0xef, 0x3f, 0x67, 0xc1, 0xb0, 0xeb, 0xbb, 0xb1, 0xeb, 0x30, 0x97, 0x4d, 0x3e, 0x17, + 0xdf, 0x2a, 0x76, 0x8b, 0x9c, 0x72, 0x10, 0x9a, 0xe7, 0x94, 0x8a, 0x19, 0x36, 0x39, 0xa3, 0xf7, + 0x89, 0xb4, 0x8f, 0x62, 0x6e, 0xa5, 0x23, 0xca, 0xa9, 0x5c, 0x8f, 0x16, 0x35, 0xbc, 0xe2, 0x30, + 0xa7, 0x8a, 0x2b, 0x98, 0x92, 0x52, 0x17, 0x07, 0x28, 0xd3, 0x96, 0x35, 0x63, 0xce, 0xc8, 0x8e, + 0x00, 0x75, 0x8f, 0xc5, 0x01, 0x43, 0xea, 0xa7, 0xa1, 0xe2, 0xb4, 0xe3, 0xa0, 0x49, 0x87, 0x49, + 0x1c, 0xa5, 0xea, 0xa4, 0x01, 0x09, 0xc0, 0x1a, 0xc7, 0xfe, 0x4c, 0x09, 0x52, 0x69, 0xe8, 0x68, + 0xc7, 0xbc, 0xef, 0xdd, 0xca, 0xf7, 0xbe, 0x77, 0xd5, 0x99, 0xac, 0x3b, 0xdf, 0x51, 0x43, 0x7a, + 0xbf, 0xb8, 0x8d, 0xf9, 0x62, 0xda, 0xfb, 0xf5, 0x13, 0xfd, 0x9d, 0x2a, 0xd0, 0xb9, 0x3a, 0xcd, + 0xab, 0x78, 0x4d, 0xed, 0xeb, 0x28, 0xdb, 0xef, 0xea, 0xdf, 0x0f, 0x8b, 0x6b, 0x7a, 0x30, 0x89, + 0xda, 0x5e, 0x2c, 0x66, 0xc3, 0x8b, 0x39, 0xae, 0x32, 0x4e, 0x58, 0xd7, 0x72, 0xe1, 0xff, 0xb1, + 0xc1, 0x34, 0xe9, 0xce, 0x1c, 0x3c, 0x52, 0x77, 0xe6, 0x50, 0xae, 0xee, 0xcc, 0xf3, 0x00, 0x6c, + 0x6e, 0xf3, 0xd0, 0xdf, 0x32, 0xf3, 0x32, 0x29, 0x51, 0x88, 0x15, 0x04, 0x1b, 0x58, 0xf6, 0x0f, + 0x43, 0xb2, 0x18, 0x11, 0x9a, 0x94, 0xb5, 0x8f, 0xf8, 0x89, 0x07, 0xcb, 0xba, 0x4a, 0x94, 0x29, + 0xfa, 0x0d, 0x0b, 0xcc, 0x8a, 0x49, 0xe8, 0x55, 0x5e, 0x9a, 0xc9, 0xca, 0xe3, 0x64, 0xdc, 0xa0, + 0x3b, 0xb5, 0xec, 0xb4, 0x52, 0x21, 0x1a, 0xb2, 0x3e, 0xd3, 0xd9, 0xb7, 0x41, 0x59, 0x42, 0x0f, + 0x64, 0xd4, 0x7d, 0x10, 0x4e, 0xc9, 0x0c, 0x6e, 0xe9, 0xa3, 0x17, 0xa7, 0xaa, 0xfb, 0xbb, 0x7e, + 0xa4, 0x3f, 0xa7, 0xd0, 0xcb, 0x9f, 0xd3, 0xc7, 0xad, 0xff, 0xbf, 0x69, 0xc1, 0xb9, 0x74, 0x07, + 0xa2, 0xe5, 0xc0, 0x77, 0xe3, 0x20, 0xac, 0x92, 0x38, 0x76, 0xfd, 0x06, 0xab, 0x48, 0x79, 0xcb, + 0x09, 0xe5, 0xad, 0x24, 0x4c, 0x50, 0xde, 0x70, 0x42, 0x1f, 0xb3, 0x56, 0xd4, 0x81, 0x41, 0x1e, + 0x1f, 0x2a, 0xac, 0xf5, 0x43, 0xae, 0x8d, 0x8c, 0xe1, 0xd0, 0xdb, 0x05, 0x1e, 0x9b, 0x8a, 0x05, + 0x43, 0xfb, 0x3b, 0x16, 0xa0, 0x95, 0x6d, 0x12, 0x86, 0x6e, 0xdd, 0x88, 0x68, 0x65, 0xd7, 0xdd, + 0x19, 0xd7, 0xda, 0x99, 0xf5, 0x05, 0x52, 0xd7, 0xdd, 0x19, 0xff, 0xb2, 0xaf, 0xbb, 0x2b, 0x1c, + 0xec, 0xba, 0x3b, 0xb4, 0x02, 0x67, 0x9a, 0x7c, 0xbb, 0xc1, 0xaf, 0x90, 0xe2, 0x7b, 0x0f, 0x95, + 0x0a, 0xfb, 0xd0, 0xed, 0xdd, 0xc9, 0x33, 0xcb, 0x59, 0x08, 0x38, 0xfb, 0x39, 0xfb, 0x6d, 0x80, + 0x78, 0x20, 0xeb, 0x5c, 0x56, 0x2c, 0x5e, 0x4f, 0xf7, 0x8b, 0xfd, 0x85, 0x12, 0x8c, 0xa7, 0x6a, + 0xd6, 0xd3, 0xad, 0x5e, 0x77, 0xf0, 0xdf, 0xa1, 0xf5, 0x77, 0x77, 0xf7, 0xfa, 0x0a, 0x27, 0xf4, + 0xa1, 0xe4, 0xfa, 0xad, 0x76, 0x9c, 0x4f, 0x26, 0x3e, 0xef, 0xc4, 0x22, 0x25, 0x68, 0xb8, 0x8b, + 0xe9, 0x5f, 0xcc, 0xd9, 0xe4, 0x19, 0x9c, 0x98, 0x30, 0xc6, 0x07, 0xee, 0x91, 0x3b, 0xe0, 0xc3, + 0x3a, 0x54, 0xb0, 0x94, 0x87, 0x63, 0x31, 0x35, 0x59, 0x8e, 0x3a, 0x94, 0xe4, 0x6b, 0x05, 0x18, + 0x36, 0x3e, 0x1a, 0xfa, 0x95, 0x64, 0x3d, 0x41, 0x2b, 0xbf, 0x57, 0x62, 0xf4, 0xa7, 0x74, 0xc5, + 0x40, 0xfe, 0x4a, 0x4f, 0x74, 0x97, 0x12, 0xbc, 0xb3, 0x3b, 0x79, 0x22, 0x55, 0x2c, 0x30, 0x51, + 0x5e, 0xf0, 0xec, 0x07, 0x60, 0x3c, 0x45, 0x26, 0xe3, 0x95, 0xd7, 0xcc, 0x57, 0x3e, 0xb4, 0x5b, + 0xca, 0x1c, 0xb2, 0xaf, 0xd2, 0x21, 0x13, 0x09, 0xc0, 0x81, 0x47, 0xfa, 0xf0, 0xc1, 0xa6, 0xf2, + 0xfc, 0x0b, 0x7d, 0xe6, 0xf9, 0x3f, 0x09, 0xe5, 0x56, 0xe0, 0xb9, 0x35, 0x57, 0x95, 0xf7, 0x65, + 0x95, 0x05, 0x56, 0x45, 0x1b, 0x56, 0x50, 0x74, 0x0b, 0x2a, 0x37, 0x6f, 0xc5, 0xfc, 0xf4, 0x47, + 0xf8, 0xb7, 0xf3, 0x3a, 0xf4, 0x51, 0x46, 0x8b, 0x3a, 0x5e, 0xc2, 0x9a, 0x17, 0xb2, 0x61, 0x90, + 0x29, 0x41, 0x99, 0x0c, 0xc4, 0x7c, 0xef, 0x4c, 0x3b, 0x46, 0x58, 0x40, 0xec, 0x2f, 0x57, 0xe0, + 0x74, 0xd6, 0xc5, 0x21, 0xe8, 0xfd, 0x30, 0xc8, 0xfb, 0x98, 0xcf, 0xdd, 0x54, 0x59, 0x3c, 0x16, + 0x18, 0x41, 0xd1, 0x2d, 0xf6, 0x1b, 0x0b, 0x9e, 0x82, 0xbb, 0xe7, 0xac, 0x8b, 0x19, 0x72, 0x34, + 0xdc, 0x97, 0x1c, 0xcd, 0x7d, 0xc9, 0xe1, 0xdc, 0x3d, 0x67, 0x1d, 0xed, 0x40, 0xa9, 0xe1, 0xc6, + 0xc4, 0x11, 0x4e, 0x84, 0x1b, 0x47, 0xc2, 0x9c, 0x38, 0xdc, 0x4a, 0x63, 0x3f, 0x31, 0x67, 0x88, + 0xbe, 0x64, 0xc1, 0xf8, 0x7a, 0xb2, 0xc0, 0x88, 0x10, 0x9e, 0xce, 0x11, 0x5c, 0x0e, 0x93, 0x64, + 0xc4, 0xef, 0x7b, 0x4c, 0x35, 0xe2, 0x74, 0x77, 0xd0, 0x47, 0x2c, 0x18, 0xda, 0x70, 0x3d, 0xa3, + 0xfa, 0xfe, 0x11, 0x7c, 0x9c, 0x8b, 0x8c, 0x81, 0xde, 0x71, 0xf0, 0xff, 0x11, 0x96, 0x9c, 0x7b, + 0x69, 0xaa, 0xc1, 0xc3, 0x6a, 0xaa, 0xa1, 0x7b, 0xa4, 0xa9, 0x3e, 0x6e, 0x41, 0x45, 0x8d, 0xb4, + 0x28, 0xd4, 0xf0, 0x9e, 0x23, 0xfc, 0xe4, 0xdc, 0x73, 0xa2, 0xfe, 0x62, 0xcd, 0x1c, 0x7d, 0xd6, + 0x82, 0x61, 0xe7, 0xb5, 0x76, 0x48, 0xea, 0x64, 0x3b, 0x68, 0x45, 0xa2, 0x7c, 0xe2, 0xcb, 0xf9, + 0x77, 0x66, 0x86, 0x32, 0x99, 0x27, 0xdb, 0x2b, 0xad, 0x48, 0x24, 0x2a, 0xea, 0x06, 0x6c, 0x76, + 0xc1, 0xde, 0x2d, 0xc0, 0xe4, 0x3e, 0x14, 0xd0, 0xf3, 0x30, 0x12, 0x84, 0x0d, 0xc7, 0x77, 0x5f, + 0x33, 0x2b, 0x06, 0x29, 0x2b, 0x6b, 0xc5, 0x80, 0xe1, 0x04, 0xa6, 0x59, 0x4a, 0xa2, 0xb0, 0x4f, + 0x29, 0x89, 0x73, 0x30, 0x10, 0x92, 0x56, 0x90, 0xde, 0x2c, 0xb0, 0x24, 0x21, 0x06, 0x41, 0x8f, + 0x42, 0xd1, 0x69, 0xb9, 0x22, 0xb4, 0x44, 0xed, 0x81, 0x66, 0x56, 0x17, 0x31, 0x6d, 0x4f, 0x54, + 0xb6, 0x29, 0x1d, 0x4b, 0x65, 0x1b, 0xaa, 0x06, 0xc4, 0xd9, 0xc5, 0xa0, 0x56, 0x03, 0xc9, 0x33, + 0x05, 0xfb, 0xf5, 0x22, 0x3c, 0xba, 0xe7, 0x7c, 0xd1, 0x71, 0xa6, 0xd6, 0x1e, 0x71, 0xa6, 0x72, + 0x78, 0x0a, 0xfb, 0x0d, 0x4f, 0xb1, 0xc7, 0xf0, 0x7c, 0x84, 0x2e, 0x03, 0x59, 0x69, 0x29, 0x9f, + 0xeb, 0x7e, 0x7b, 0x15, 0x6e, 0x12, 0x2b, 0x40, 0x42, 0xb1, 0xe6, 0x4b, 0xf7, 0x00, 0x89, 0x32, + 0x0a, 0xa5, 0x3c, 0xd4, 0x40, 0xcf, 0x6a, 0x47, 0x7c, 0xee, 0xf7, 0xaa, 0xcd, 0x60, 0xff, 0xf6, + 0x00, 0x3c, 0xde, 0x87, 0xf4, 0x36, 0x67, 0xb1, 0xd5, 0xe7, 0x2c, 0xfe, 0x1e, 0xff, 0x4c, 0x1f, + 0xcb, 0xfc, 0x4c, 0x38, 0xff, 0xcf, 0xb4, 0xf7, 0x17, 0x42, 0x4f, 0x41, 0xd9, 0xf5, 0x23, 0x52, + 0x6b, 0x87, 0x3c, 0xe6, 0xde, 0xc8, 0x20, 0x5c, 0x14, 0xed, 0x58, 0x61, 0xd0, 0x3d, 0x5d, 0xcd, + 0xa1, 0xcb, 0x7f, 0x28, 0xa7, 0xb4, 0x79, 0x33, 0x19, 0x91, 0x9b, 0x14, 0x73, 0x33, 0x54, 0x02, + 0x70, 0x36, 0xf6, 0xdf, 0xb5, 0xe0, 0x6c, 0x6f, 0x15, 0x8b, 0x9e, 0x81, 0xe1, 0x75, 0x16, 0xb8, + 0xc5, 0x2e, 0x7a, 0x97, 0x53, 0x87, 0xbd, 0xaf, 0x6e, 0xc6, 0x26, 0x0e, 0x9a, 0x83, 0x93, 0x66, + 0xc4, 0xd7, 0xb2, 0x11, 0x55, 0xc2, 0x9c, 0x00, 0x6b, 0x69, 0x20, 0xee, 0xc6, 0xb7, 0xbf, 0x5b, + 0xcc, 0xee, 0x16, 0x37, 0xc5, 0x0e, 0x32, 0x9b, 0xc5, 0x5c, 0x2d, 0xf4, 0x21, 0x71, 0x8b, 0xc7, + 0x2d, 0x71, 0x07, 0x7a, 0x49, 0x5c, 0x34, 0x0f, 0x27, 0x8c, 0x9b, 0xf8, 0x78, 0x21, 0x05, 0x1e, + 0x65, 0xa8, 0xaa, 0x20, 0xad, 0xa6, 0xe0, 0xb8, 0xeb, 0x89, 0xfb, 0x7c, 0xea, 0xfd, 0x6a, 0x01, + 0x1e, 0xea, 0x69, 0xfd, 0x1e, 0x93, 0x46, 0x31, 0x3f, 0xff, 0xc0, 0xf1, 0x7c, 0x7e, 0xf3, 0xa3, + 0x94, 0xf6, 0xfb, 0x28, 0xf6, 0x1f, 0x17, 0x7a, 0x2e, 0x04, 0xba, 0x13, 0xfa, 0xbe, 0x1d, 0xa5, + 0x17, 0x60, 0xd4, 0x69, 0xb5, 0x38, 0x1e, 0x8b, 0xd8, 0x4e, 0x55, 0x5d, 0x9b, 0x31, 0x81, 0x38, + 0x89, 0xdb, 0x97, 0x4d, 0xf3, 0xa7, 0x16, 0x54, 0x30, 0xd9, 0xe0, 0xd2, 0x08, 0xdd, 0x14, 0x43, + 0x64, 0xe5, 0x51, 0xf7, 0x9a, 0x0e, 0x6c, 0xe4, 0xb2, 0x7a, 0xd0, 0x59, 0x83, 0x7d, 0xd8, 0xbc, + 0x65, 0x75, 0x37, 0x5f, 0xb1, 0xf7, 0xdd, 0x7c, 0xf6, 0xb7, 0x87, 0xe8, 0xeb, 0xb5, 0x82, 0xb9, + 0x90, 0xd4, 0x23, 0xfa, 0x7d, 0xdb, 0xa1, 0x27, 0x26, 0x89, 0xfa, 0xbe, 0xd7, 0xf0, 0x12, 0xa6, + 0xed, 0x89, 0x03, 0xb2, 0xc2, 0x81, 0x6a, 0x4e, 0x15, 0xf7, 0xad, 0x39, 0xf5, 0x02, 0x8c, 0x46, + 0xd1, 0xe6, 0x6a, 0xe8, 0x6e, 0x3b, 0x31, 0xb9, 0x42, 0x3a, 0xc2, 0xf6, 0xd5, 0xf5, 0x57, 0xaa, + 0x97, 0x34, 0x10, 0x27, 0x71, 0xd1, 0x02, 0x9c, 0xd4, 0x95, 0x9f, 0x48, 0x18, 0xb3, 0x9c, 0x22, + 0x3e, 0x13, 0x54, 0xb1, 0x05, 0x5d, 0x2b, 0x4a, 0x20, 0xe0, 0xee, 0x67, 0xa8, 0x3c, 0x4d, 0x34, + 0xd2, 0x8e, 0x0c, 0x26, 0xe5, 0x69, 0x82, 0x0e, 0xed, 0x4b, 0xd7, 0x13, 0x68, 0x19, 0x4e, 0xf1, + 0x89, 0x31, 0xd3, 0x6a, 0x19, 0x6f, 0x34, 0x94, 0xac, 0x37, 0xbc, 0xd0, 0x8d, 0x82, 0xb3, 0x9e, + 0x43, 0xcf, 0xc1, 0xb0, 0x6a, 0x5e, 0x9c, 0x17, 0x67, 0x3b, 0xca, 0xb7, 0xa4, 0xc8, 0x2c, 0xd6, + 0xb1, 0x89, 0x87, 0xde, 0x0d, 0x0f, 0xea, 0xbf, 0x3c, 0xf1, 0x94, 0x1f, 0x78, 0xce, 0x8b, 0xa2, + 0x7a, 0xea, 0x9e, 0xb7, 0x85, 0x4c, 0xb4, 0x3a, 0xee, 0xf5, 0x3c, 0x5a, 0x87, 0xb3, 0x0a, 0x74, + 0xc1, 0x8f, 0x59, 0x16, 0x59, 0x44, 0x66, 0x9d, 0x88, 0x5c, 0x0b, 0x3d, 0x56, 0x86, 0xaf, 0xa2, + 0x2f, 0x0b, 0x5f, 0x70, 0xe3, 0x4b, 0x59, 0x98, 0x78, 0x09, 0xef, 0x41, 0x05, 0x4d, 0x43, 0x85, + 0xf8, 0xce, 0xba, 0x47, 0x56, 0xe6, 0x16, 0x59, 0x71, 0x3e, 0xe3, 0x7c, 0xf5, 0x82, 0x04, 0x60, + 0x8d, 0xa3, 0xe2, 0x7e, 0x47, 0x7a, 0x5e, 0x5c, 0xbf, 0x0a, 0xa7, 0x1b, 0xb5, 0x16, 0xb5, 0x08, + 0xdd, 0x1a, 0x99, 0xa9, 0xb1, 0x30, 0x47, 0xfa, 0x61, 0x78, 0x21, 0x68, 0x95, 0x40, 0xb1, 0x30, + 0xb7, 0xda, 0x85, 0x83, 0x33, 0x9f, 0x64, 0xe1, 0xb0, 0x61, 0xb0, 0xd3, 0x99, 0x38, 0x95, 0x0a, + 0x87, 0xa5, 0x8d, 0x98, 0xc3, 0xd0, 0x65, 0x40, 0x2c, 0x1b, 0xe7, 0x52, 0x1c, 0xb7, 0x94, 0x09, + 0x3a, 0x71, 0x3a, 0x59, 0x62, 0xeb, 0x62, 0x17, 0x06, 0xce, 0x78, 0x8a, 0x5a, 0x34, 0x7e, 0xc0, + 0xa8, 0x4f, 0x3c, 0x98, 0xb4, 0x68, 0xae, 0xf2, 0x66, 0x2c, 0xe1, 0xf6, 0x9f, 0x58, 0x30, 0xaa, + 0x96, 0xf6, 0x31, 0xa4, 0xcb, 0x79, 0xc9, 0x74, 0xb9, 0x85, 0xc3, 0x0b, 0x47, 0xd6, 0xf3, 0x1e, + 0x31, 0xe9, 0x5f, 0x1b, 0x06, 0xd0, 0x02, 0x54, 0xe9, 0x2e, 0xab, 0xa7, 0xee, 0xba, 0x6f, 0x85, + 0x57, 0x56, 0x31, 0xac, 0xd2, 0xbd, 0x2d, 0x86, 0x55, 0x85, 0x33, 0xd2, 0xb2, 0xe0, 0x87, 0x7d, + 0x97, 0x82, 0x48, 0xc9, 0xc2, 0xf2, 0xec, 0xa3, 0x82, 0xd0, 0x99, 0xc5, 0x2c, 0x24, 0x9c, 0xfd, + 0x6c, 0xc2, 0xa0, 0x19, 0xda, 0xd7, 0xca, 0x54, 0xcb, 0x7f, 0x69, 0x43, 0xde, 0x6e, 0x96, 0x5a, + 0xfe, 0x4b, 0x17, 0xab, 0x58, 0xe3, 0x64, 0xeb, 0x80, 0x4a, 0x4e, 0x3a, 0x00, 0x0e, 0xac, 0x03, + 0xa4, 0x34, 0x1a, 0xee, 0x29, 0x8d, 0xe4, 0xa1, 0xc2, 0x48, 0xcf, 0x43, 0x85, 0x77, 0xc0, 0x98, + 0xeb, 0x6f, 0x92, 0xd0, 0x8d, 0x49, 0x9d, 0xad, 0x05, 0x26, 0xa9, 0xca, 0xda, 0x02, 0x58, 0x4c, + 0x40, 0x71, 0x0a, 0x3b, 0x29, 0x42, 0xc7, 0xfa, 0x10, 0xa1, 0x3d, 0x14, 0xd7, 0x78, 0x3e, 0x8a, + 0xeb, 0xc4, 0xe1, 0x15, 0xd7, 0xc9, 0x23, 0x55, 0x5c, 0x28, 0x17, 0xc5, 0xd5, 0x97, 0x4e, 0x30, + 0x76, 0xa6, 0xa7, 0xf7, 0xd9, 0x99, 0xf6, 0xd2, 0x5a, 0x67, 0xee, 0x5a, 0x6b, 0x65, 0x2b, 0xa4, + 0x07, 0x8e, 0x5a, 0x21, 0x7d, 0xbc, 0x00, 0x67, 0xb4, 0xc8, 0xa6, 0x0b, 0xc5, 0xdd, 0xa0, 0x42, + 0x8b, 0xdd, 0xa5, 0xc9, 0xcf, 0xe8, 0x8c, 0xa4, 0x4b, 0x9d, 0xbf, 0xa9, 0x20, 0xd8, 0xc0, 0x62, + 0xb9, 0x8b, 0x24, 0x64, 0x85, 0xe7, 0xd3, 0xf2, 0x7c, 0x4e, 0xb4, 0x63, 0x85, 0x41, 0xa7, 0x22, + 0xfd, 0x2d, 0x72, 0xd0, 0xd3, 0x25, 0x4d, 0xe7, 0x34, 0x08, 0x9b, 0x78, 0xe8, 0x49, 0xce, 0x84, + 0xc9, 0x12, 0x2a, 0xd3, 0x47, 0xf8, 0x46, 0x44, 0x89, 0x0f, 0x05, 0x95, 0xdd, 0x61, 0x49, 0xaa, + 0xa5, 0xee, 0xee, 0xb0, 0x70, 0x37, 0x85, 0x61, 0xff, 0x4f, 0x0b, 0x1e, 0xca, 0x1c, 0x8a, 0x63, + 0xd0, 0xd3, 0x3b, 0x49, 0x3d, 0x5d, 0xcd, 0x6b, 0x13, 0x63, 0xbc, 0x45, 0x0f, 0x9d, 0xfd, 0x9f, + 0x2c, 0x18, 0xd3, 0xf8, 0xc7, 0xf0, 0xaa, 0x6e, 0xf2, 0x55, 0xf3, 0xdb, 0xaf, 0x55, 0xba, 0xde, + 0xed, 0xf7, 0x0a, 0xa0, 0xca, 0x0c, 0xcf, 0xd4, 0x64, 0x11, 0xf7, 0x7d, 0x4e, 0x8d, 0x3b, 0x30, + 0xc8, 0x0e, 0xbd, 0xa3, 0x7c, 0x02, 0x7a, 0x92, 0xfc, 0xd9, 0x01, 0xba, 0x0e, 0x28, 0x60, 0x7f, + 0x23, 0x2c, 0x18, 0xb2, 0x6b, 0x11, 0x78, 0x05, 0xd7, 0xba, 0x48, 0xc1, 0xd3, 0xd7, 0x22, 0x88, + 0x76, 0xac, 0x30, 0xa8, 0x26, 0x71, 0x6b, 0x81, 0x3f, 0xe7, 0x39, 0x91, 0xbc, 0x5e, 0x5b, 0x69, + 0x92, 0x45, 0x09, 0xc0, 0x1a, 0x87, 0x9d, 0x87, 0xbb, 0x51, 0xcb, 0x73, 0x3a, 0xc6, 0xae, 0xdc, + 0xa8, 0xb5, 0xa2, 0x40, 0xd8, 0xc4, 0xb3, 0x9b, 0x30, 0x91, 0x7c, 0x89, 0x79, 0xb2, 0xc1, 0x82, + 0x51, 0xfb, 0x1a, 0xce, 0x69, 0xa8, 0x38, 0xec, 0xa9, 0xa5, 0xb6, 0x23, 0x64, 0x82, 0x0e, 0xc9, + 0x94, 0x00, 0xac, 0x71, 0xec, 0x7f, 0x6a, 0xc1, 0xa9, 0x8c, 0x41, 0xcb, 0x31, 0xc5, 0x31, 0xd6, + 0xd2, 0x26, 0xcb, 0x06, 0xf8, 0x21, 0x18, 0xaa, 0x93, 0x0d, 0x47, 0x86, 0x3b, 0x1a, 0xd2, 0x73, + 0x9e, 0x37, 0x63, 0x09, 0xb7, 0x7f, 0xab, 0x00, 0xe3, 0xc9, 0xbe, 0x46, 0x2c, 0x6d, 0x88, 0x0f, + 0x93, 0x1b, 0xd5, 0x82, 0x6d, 0x12, 0x76, 0xe8, 0x9b, 0x5b, 0xa9, 0xb4, 0xa1, 0x2e, 0x0c, 0x9c, + 0xf1, 0x14, 0x2b, 0x32, 0x5e, 0x57, 0xa3, 0x2d, 0x67, 0xe4, 0xf5, 0x3c, 0x67, 0xa4, 0xfe, 0x98, + 0x66, 0x68, 0x84, 0x62, 0x89, 0x4d, 0xfe, 0xd4, 0x16, 0x61, 0x71, 0xd8, 0xb3, 0x6d, 0xd7, 0x8b, + 0x5d, 0x5f, 0xbc, 0xb2, 0x98, 0xab, 0xca, 0x16, 0x59, 0xee, 0x46, 0xc1, 0x59, 0xcf, 0xd9, 0xdf, + 0x19, 0x00, 0x95, 0xbe, 0xcf, 0x42, 0xd7, 0x72, 0x0a, 0xfc, 0x3b, 0x68, 0xf2, 0x99, 0x9a, 0x5b, + 0x03, 0x7b, 0xc5, 0x92, 0x70, 0x57, 0x8e, 0xe9, 0xcf, 0x55, 0x03, 0xb6, 0xa6, 0x41, 0xd8, 0xc4, + 0xa3, 0x3d, 0xf1, 0xdc, 0x6d, 0xc2, 0x1f, 0x1a, 0x4c, 0xf6, 0x64, 0x49, 0x02, 0xb0, 0xc6, 0xa1, + 0x3d, 0xa9, 0xbb, 0x1b, 0x1b, 0xc2, 0x2f, 0xa1, 0x7a, 0x42, 0x47, 0x07, 0x33, 0x08, 0xbf, 0x86, + 0x22, 0xd8, 0x12, 0xf6, 0xb7, 0x71, 0x0d, 0x45, 0xb0, 0x85, 0x19, 0x84, 0x7e, 0x25, 0x3f, 0x08, + 0x9b, 0x8e, 0xe7, 0xbe, 0x46, 0xea, 0x8a, 0x8b, 0xb0, 0xbb, 0xd5, 0x57, 0xba, 0xda, 0x8d, 0x82, + 0xb3, 0x9e, 0xa3, 0x13, 0xba, 0x15, 0x92, 0xba, 0x5b, 0x8b, 0x4d, 0x6a, 0x90, 0x9c, 0xd0, 0xab, + 0x5d, 0x18, 0x38, 0xe3, 0x29, 0x34, 0x03, 0xe3, 0xb2, 0xfc, 0x82, 0x2c, 0xe8, 0x35, 0x9c, 0x2c, + 0x20, 0x84, 0x93, 0x60, 0x9c, 0xc6, 0xa7, 0x42, 0xb2, 0x29, 0xca, 0x11, 0x32, 0x33, 0xdd, 0x10, + 0x92, 0xb2, 0x4c, 0x21, 0x56, 0x18, 0xf6, 0x87, 0x8b, 0x54, 0xa9, 0xf7, 0xa8, 0xfa, 0x79, 0x6c, + 0x81, 0xa6, 0xc9, 0x19, 0x39, 0xd0, 0xc7, 0x8c, 0x7c, 0x16, 0x46, 0x6e, 0x46, 0x81, 0xaf, 0x82, + 0x38, 0x4b, 0x3d, 0x83, 0x38, 0x0d, 0xac, 0xec, 0x20, 0xce, 0xc1, 0xbc, 0x82, 0x38, 0x87, 0xee, + 0x32, 0x88, 0xf3, 0x0f, 0x4a, 0xa0, 0xee, 0x19, 0xbb, 0x4a, 0xe2, 0x5b, 0x41, 0xb8, 0xe5, 0xfa, + 0x0d, 0x56, 0x4a, 0xe0, 0x4b, 0x96, 0xac, 0x46, 0xb0, 0x64, 0x26, 0xe1, 0x6d, 0xe4, 0x74, 0x57, + 0x54, 0x82, 0xd9, 0xd4, 0x9a, 0xc1, 0x28, 0x75, 0x1d, 0xba, 0x09, 0xc2, 0x89, 0x1e, 0xa1, 0x0f, + 0x00, 0x48, 0x27, 0xee, 0x86, 0x94, 0xc0, 0x8b, 0xf9, 0xf4, 0x0f, 0x93, 0x0d, 0x6d, 0x52, 0xaf, + 0x29, 0x26, 0xd8, 0x60, 0x88, 0x3e, 0xae, 0x13, 0x14, 0x79, 0xb6, 0xc7, 0xfb, 0x8e, 0x64, 0x6c, + 0xfa, 0x49, 0x4f, 0xc4, 0x30, 0xe4, 0xfa, 0x0d, 0x3a, 0x4f, 0x44, 0xb0, 0xdb, 0x5b, 0xb2, 0x4a, + 0xbe, 0x2c, 0x05, 0x4e, 0x7d, 0xd6, 0xf1, 0x1c, 0xbf, 0x46, 0xc2, 0x45, 0x8e, 0xae, 0x35, 0xa8, + 0x68, 0xc0, 0x92, 0x50, 0xd7, 0x65, 0x68, 0xa5, 0x7e, 0x2e, 0x43, 0x3b, 0xfb, 0x4e, 0x38, 0xd9, + 0xf5, 0x31, 0x0f, 0x94, 0x8d, 0x78, 0xf7, 0x89, 0x8c, 0xf6, 0x6f, 0x0f, 0x6a, 0xa5, 0x75, 0x35, + 0xa8, 0xf3, 0xbb, 0xb5, 0x42, 0xfd, 0x45, 0x85, 0xc9, 0x9c, 0xe3, 0x14, 0x51, 0x6a, 0xc6, 0x68, + 0xc4, 0x26, 0x4b, 0x3a, 0x47, 0x5b, 0x4e, 0x48, 0xfc, 0xa3, 0x9e, 0xa3, 0xab, 0x8a, 0x09, 0x36, + 0x18, 0xa2, 0xcd, 0x44, 0x3a, 0xd2, 0xc5, 0xc3, 0xa7, 0x23, 0xb1, 0x02, 0x7c, 0x59, 0x57, 0xd0, + 0x7c, 0xd6, 0x82, 0x31, 0x3f, 0x31, 0x73, 0xf3, 0x89, 0x40, 0xce, 0x5e, 0x15, 0xfc, 0x9a, 0xca, + 0x64, 0x1b, 0x4e, 0xf1, 0xcf, 0x52, 0x69, 0xa5, 0x03, 0xaa, 0x34, 0x7d, 0xb7, 0xdf, 0x60, 0xaf, + 0xbb, 0xfd, 0x90, 0xaf, 0x6e, 0x5c, 0x1d, 0xca, 0xfd, 0xc6, 0x55, 0xc8, 0xb8, 0x6d, 0xf5, 0x06, + 0x54, 0x6a, 0x21, 0x71, 0xe2, 0xbb, 0xbc, 0x7c, 0x93, 0xc5, 0x76, 0xcc, 0x49, 0x02, 0x58, 0xd3, + 0xb2, 0xff, 0xcf, 0x00, 0x9c, 0x90, 0x23, 0x22, 0xb3, 0x17, 0xa8, 0x7e, 0xe4, 0x7c, 0xb5, 0xad, + 0xac, 0xf4, 0xe3, 0x25, 0x09, 0xc0, 0x1a, 0x87, 0xda, 0x63, 0xed, 0x88, 0xac, 0xb4, 0x88, 0xbf, + 0xe4, 0xae, 0x47, 0xe2, 0x30, 0x56, 0x2d, 0x94, 0x6b, 0x1a, 0x84, 0x4d, 0x3c, 0x6a, 0xdb, 0x3b, + 0x86, 0xd1, 0x6a, 0xd8, 0xf6, 0xd2, 0x50, 0x95, 0x70, 0xf4, 0x4b, 0x99, 0x65, 0xc8, 0xf3, 0xc9, + 0xf9, 0xeb, 0x4a, 0xda, 0x38, 0xe0, 0xd5, 0xd1, 0xff, 0xc8, 0x82, 0x33, 0xbc, 0x55, 0x8e, 0xe4, + 0xb5, 0x56, 0xdd, 0x89, 0x49, 0x94, 0xcf, 0xf5, 0x25, 0x19, 0xfd, 0xd3, 0xee, 0xe5, 0x2c, 0xb6, + 0x38, 0xbb, 0x37, 0xe8, 0x33, 0x16, 0x8c, 0x6f, 0x25, 0xca, 0xc5, 0x48, 0xd5, 0x71, 0xd8, 0x4a, + 0x0e, 0x09, 0xa2, 0x7a, 0xa9, 0x25, 0xdb, 0x23, 0x9c, 0xe6, 0x6e, 0xff, 0x0f, 0x0b, 0x4c, 0x31, + 0x7a, 0xfc, 0x55, 0x66, 0x0e, 0x6e, 0x0a, 0x4a, 0xeb, 0xb2, 0xd4, 0xd3, 0xba, 0x7c, 0x14, 0x8a, + 0x6d, 0xb7, 0x2e, 0xf6, 0x17, 0xfa, 0x88, 0x78, 0x71, 0x1e, 0xd3, 0x76, 0xfb, 0x5f, 0x95, 0xb4, + 0x1b, 0x44, 0xa4, 0xd4, 0x7d, 0x5f, 0xbc, 0xf6, 0x86, 0xaa, 0xc3, 0xc8, 0xdf, 0xfc, 0x6a, 0x57, + 0x1d, 0xc6, 0x1f, 0x3f, 0x78, 0xc6, 0x24, 0x1f, 0xa0, 0x5e, 0x65, 0x18, 0x87, 0xf6, 0x49, 0x97, + 0xbc, 0x09, 0x65, 0xba, 0x05, 0x63, 0xfe, 0xcc, 0x72, 0xa2, 0x53, 0xe5, 0x4b, 0xa2, 0xfd, 0xce, + 0xee, 0xe4, 0x8f, 0x1d, 0xbc, 0x5b, 0xf2, 0x69, 0xac, 0xe8, 0xa3, 0x08, 0x2a, 0xf4, 0x37, 0xcb, + 0xec, 0x14, 0x9b, 0xbb, 0x6b, 0x4a, 0x66, 0x4a, 0x40, 0x2e, 0x69, 0xa3, 0x9a, 0x0f, 0xf2, 0xa1, + 0xc2, 0x6e, 0xd9, 0x67, 0x4c, 0xf9, 0x1e, 0x70, 0x55, 0xe5, 0x57, 0x4a, 0xc0, 0x9d, 0xdd, 0xc9, + 0x17, 0x0e, 0xce, 0x54, 0x3d, 0x8e, 0x35, 0x0b, 0xfb, 0x6f, 0x06, 0xf4, 0xdc, 0x15, 0xe5, 0x37, + 0xbf, 0x2f, 0xe6, 0xee, 0xf3, 0xa9, 0xb9, 0x7b, 0xae, 0x6b, 0xee, 0x8e, 0xe9, 0x2b, 0xd8, 0x13, + 0xb3, 0xf1, 0xb8, 0x0d, 0x81, 0xfd, 0xfd, 0x0d, 0xcc, 0x02, 0x7a, 0xb5, 0xed, 0x86, 0x24, 0x5a, + 0x0d, 0xdb, 0xbe, 0xeb, 0x37, 0xd8, 0x74, 0x2c, 0x9b, 0x16, 0x50, 0x02, 0x8c, 0xd3, 0xf8, 0x74, + 0x53, 0x4f, 0xbf, 0xf9, 0x0d, 0x67, 0x9b, 0xcf, 0x2a, 0xa3, 0x62, 0x5b, 0x55, 0xb4, 0x63, 0x85, + 0x81, 0x36, 0xe1, 0x11, 0x49, 0x60, 0x9e, 0x78, 0x44, 0xdc, 0xa1, 0xbe, 0xe1, 0x86, 0x4d, 0x1e, + 0x20, 0xce, 0x23, 0x13, 0xde, 0x2c, 0x28, 0x3c, 0x82, 0xf7, 0xc0, 0xc5, 0x7b, 0x52, 0xb2, 0xbf, + 0xca, 0xce, 0xeb, 0x8d, 0xe4, 0x75, 0x3a, 0xfb, 0x3c, 0xb7, 0xe9, 0xca, 0xc2, 0x72, 0x6a, 0xf6, + 0x2d, 0xd1, 0x46, 0xcc, 0x61, 0xe8, 0x16, 0x0c, 0xad, 0xf3, 0xab, 0x6e, 0xf3, 0xb9, 0x56, 0x43, + 0xdc, 0x9b, 0xcb, 0xaa, 0xb3, 0xca, 0x4b, 0x74, 0xef, 0xe8, 0x9f, 0x58, 0x72, 0xb3, 0xbf, 0x51, + 0x82, 0xf1, 0xd4, 0xbd, 0xf6, 0x89, 0x92, 0xd5, 0x85, 0x7d, 0x4b, 0x56, 0xbf, 0x17, 0xa0, 0x4e, + 0x5a, 0x5e, 0xd0, 0x61, 0x86, 0xdf, 0xc0, 0x81, 0x0d, 0x3f, 0xb5, 0x57, 0x98, 0x57, 0x54, 0xb0, + 0x41, 0x51, 0x54, 0xd3, 0xe3, 0x15, 0xb0, 0x53, 0xd5, 0xf4, 0x8c, 0xcb, 0x77, 0x06, 0x8f, 0xf7, + 0xf2, 0x1d, 0x17, 0xc6, 0x79, 0x17, 0x55, 0x8a, 0xf8, 0x5d, 0x64, 0x82, 0xb3, 0x24, 0x9b, 0xf9, + 0x24, 0x19, 0x9c, 0xa6, 0x6b, 0xde, 0xac, 0x53, 0x3e, 0xee, 0x9b, 0x75, 0xde, 0x0a, 0x15, 0xf9, + 0x9d, 0xa3, 0x89, 0x8a, 0x2e, 0xb3, 0x21, 0xa7, 0x41, 0x84, 0x35, 0xbc, 0xab, 0xda, 0x05, 0xdc, + 0xab, 0x6a, 0x17, 0xf6, 0xa7, 0x0b, 0x74, 0xc7, 0xc0, 0xfb, 0xa5, 0x0a, 0x37, 0x3d, 0x01, 0x83, + 0x4e, 0x3b, 0xde, 0x0c, 0xba, 0x2e, 0xcb, 0x9d, 0x61, 0xad, 0x58, 0x40, 0xd1, 0x12, 0x0c, 0xd4, + 0x75, 0x31, 0x9e, 0x83, 0x7c, 0x4f, 0xed, 0x7c, 0x75, 0x62, 0x82, 0x19, 0x15, 0xf4, 0x08, 0x0c, + 0xc4, 0x4e, 0x43, 0xe6, 0x05, 0xb2, 0x5c, 0xf0, 0x35, 0xa7, 0x11, 0x61, 0xd6, 0x7a, 0x90, 0x02, + 0xa4, 0x2f, 0xc0, 0x68, 0xe4, 0x36, 0x7c, 0x27, 0x6e, 0x87, 0xc4, 0x38, 0x9f, 0xd4, 0xd1, 0x29, + 0x26, 0x10, 0x27, 0x71, 0xed, 0xdf, 0x19, 0x81, 0xd3, 0xd5, 0xb9, 0x65, 0x79, 0x85, 0xc2, 0x91, + 0xa5, 0xf6, 0x65, 0xf1, 0x38, 0xbe, 0xd4, 0xbe, 0x1e, 0xdc, 0x3d, 0x23, 0xb5, 0xcf, 0x33, 0x52, + 0xfb, 0x92, 0x79, 0x56, 0xc5, 0x3c, 0xf2, 0xac, 0xb2, 0x7a, 0xd0, 0x4f, 0x9e, 0xd5, 0x91, 0xe5, + 0xfa, 0xed, 0xd9, 0xa1, 0x03, 0xe5, 0xfa, 0xa9, 0x44, 0xc8, 0x5c, 0x32, 0x60, 0x7a, 0x7c, 0xaa, + 0xcc, 0x44, 0x48, 0x95, 0x84, 0xc6, 0xb3, 0xbb, 0x84, 0xa8, 0x7f, 0x39, 0xff, 0x0e, 0xf4, 0x91, + 0x84, 0x26, 0x12, 0xcc, 0xcc, 0xc4, 0xc7, 0xa1, 0x3c, 0x12, 0x1f, 0xb3, 0xba, 0xb3, 0x6f, 0xe2, + 0xe3, 0x0b, 0x30, 0x5a, 0xf3, 0x02, 0x9f, 0xac, 0x86, 0x41, 0x1c, 0xd4, 0x02, 0x79, 0x5d, 0xa7, + 0xbe, 0x6d, 0xca, 0x04, 0xe2, 0x24, 0x6e, 0xaf, 0xac, 0xc9, 0xca, 0x61, 0xb3, 0x26, 0xe1, 0x1e, + 0x65, 0x4d, 0xfe, 0xac, 0xce, 0xef, 0x1f, 0x66, 0x5f, 0xe4, 0xbd, 0xf9, 0x7f, 0x91, 0xbe, 0xee, + 0xe3, 0x7c, 0x9d, 0xdf, 0x56, 0x4b, 0x4d, 0xf0, 0xb9, 0xa0, 0x49, 0x0d, 0xbf, 0x11, 0x36, 0x24, + 0xaf, 0x1c, 0xc1, 0x84, 0xbd, 0x51, 0xd5, 0x6c, 0xd4, 0x0d, 0xb6, 0xba, 0x09, 0x27, 0x3b, 0x72, + 0x98, 0xfa, 0x03, 0x5f, 0x28, 0xc0, 0x0f, 0xec, 0xdb, 0x05, 0x74, 0x0b, 0x20, 0x76, 0x1a, 0x62, + 0xa2, 0x8a, 0xa3, 0x99, 0x43, 0x86, 0x90, 0xae, 0x49, 0x7a, 0xbc, 0x70, 0x8e, 0xfa, 0xcb, 0x0e, + 0x3d, 0xe4, 0x6f, 0x16, 0x39, 0x1a, 0x78, 0x5d, 0xf5, 0x45, 0x71, 0xe0, 0x11, 0xcc, 0x20, 0x54, + 0xfd, 0x87, 0xa4, 0x41, 0x4d, 0xda, 0x62, 0x52, 0xfd, 0x63, 0xd6, 0x8a, 0x05, 0x14, 0x3d, 0x07, + 0xc3, 0x8e, 0xe7, 0xf1, 0xf4, 0x24, 0x12, 0x89, 0x6b, 0xe0, 0x74, 0xa1, 0x43, 0x0d, 0xc2, 0x26, + 0x9e, 0xfd, 0x57, 0x05, 0x98, 0xdc, 0x47, 0xa6, 0x74, 0xa5, 0xa5, 0x96, 0xfa, 0x4e, 0x4b, 0x15, + 0x29, 0x1b, 0x83, 0x3d, 0x52, 0x36, 0x9e, 0x83, 0xe1, 0x98, 0x38, 0x4d, 0x11, 0x74, 0x26, 0x7c, + 0x0e, 0xfa, 0xac, 0x59, 0x83, 0xb0, 0x89, 0x47, 0xa5, 0xd8, 0x98, 0x53, 0xab, 0x91, 0x28, 0x92, + 0x39, 0x19, 0xc2, 0x6f, 0x9b, 0x5b, 0xc2, 0x07, 0x73, 0x87, 0xcf, 0x24, 0x58, 0xe0, 0x14, 0xcb, + 0xf4, 0x80, 0x57, 0xfa, 0x1c, 0xf0, 0x2f, 0x17, 0xe0, 0xd1, 0x3d, 0xb5, 0x5b, 0xdf, 0xe9, 0x32, + 0xed, 0x88, 0x84, 0xe9, 0x89, 0x73, 0x2d, 0x22, 0x21, 0x66, 0x10, 0x3e, 0x4a, 0xad, 0x96, 0x0a, + 0x18, 0xce, 0x3f, 0x77, 0x8c, 0x8f, 0x52, 0x82, 0x05, 0x4e, 0xb1, 0xbc, 0xdb, 0x69, 0xf9, 0x8d, + 0x01, 0x78, 0xbc, 0x0f, 0x1b, 0x20, 0xc7, 0x1c, 0xbb, 0x64, 0x3e, 0x68, 0xf1, 0x1e, 0xe5, 0x83, + 0xde, 0xdd, 0x70, 0xbd, 0x91, 0x46, 0xda, 0x57, 0x2e, 0xdf, 0x57, 0x0b, 0x70, 0xb6, 0xb7, 0xc1, + 0x82, 0xde, 0x0e, 0xe3, 0xa1, 0x0a, 0xb2, 0x33, 0x53, 0x49, 0x4f, 0x71, 0xcf, 0x4e, 0x02, 0x84, + 0xd3, 0xb8, 0x68, 0x0a, 0xa0, 0xe5, 0xc4, 0x9b, 0xd1, 0x85, 0x1d, 0x37, 0x8a, 0x45, 0x41, 0xa9, + 0x31, 0x7e, 0x96, 0x28, 0x5b, 0xb1, 0x81, 0x41, 0xd9, 0xb1, 0x7f, 0xf3, 0xc1, 0xd5, 0x20, 0xe6, + 0x0f, 0xf1, 0xcd, 0xd6, 0x29, 0x79, 0xbd, 0x94, 0x01, 0xc2, 0x69, 0x5c, 0xca, 0x8e, 0x9d, 0x56, + 0xf3, 0x8e, 0xf2, 0x5d, 0x18, 0x63, 0xb7, 0xa4, 0x5a, 0xb1, 0x81, 0x91, 0x4e, 0x92, 0x2d, 0xed, + 0x9f, 0x24, 0x6b, 0xff, 0xcb, 0x02, 0x3c, 0xd4, 0xd3, 0xe0, 0xed, 0x4f, 0x4c, 0xdd, 0x7f, 0x89, + 0xad, 0x77, 0xb9, 0xc2, 0x0e, 0x96, 0x10, 0xf9, 0xa7, 0x3d, 0x66, 0x9a, 0x48, 0x88, 0xbc, 0xfb, + 0x3a, 0x0f, 0xf7, 0xdf, 0x78, 0x76, 0xe5, 0x40, 0x0e, 0x1c, 0x20, 0x07, 0x32, 0xf5, 0x31, 0x4a, + 0x7d, 0x6a, 0x87, 0x3f, 0x1f, 0xe8, 0x39, 0xbc, 0x74, 0x83, 0xdc, 0x97, 0xdf, 0x7c, 0x1e, 0x4e, + 0xb8, 0x3e, 0xbb, 0x6a, 0xb0, 0xda, 0x5e, 0x17, 0x35, 0x86, 0x78, 0x21, 0x4d, 0x95, 0x68, 0xb1, + 0x98, 0x82, 0xe3, 0xae, 0x27, 0xee, 0xc3, 0x9c, 0xd4, 0xbb, 0x1b, 0xd2, 0x03, 0x4a, 0xee, 0x15, + 0x38, 0x23, 0x87, 0x62, 0xd3, 0x09, 0x49, 0x5d, 0x28, 0xdb, 0x48, 0xa4, 0xd6, 0x3c, 0xc4, 0xd3, + 0x73, 0x32, 0x10, 0x70, 0xf6, 0x73, 0xec, 0x76, 0xb7, 0xa0, 0xe5, 0xd6, 0xc4, 0x56, 0x50, 0xdf, + 0xee, 0x46, 0x1b, 0x31, 0x87, 0x69, 0x7d, 0x51, 0x39, 0x1e, 0x7d, 0xf1, 0x5e, 0xa8, 0xa8, 0xf1, + 0xe6, 0x59, 0x02, 0x6a, 0x92, 0x77, 0x65, 0x09, 0xa8, 0x19, 0x6e, 0x60, 0xed, 0x77, 0x33, 0xf2, + 0x8f, 0xc0, 0x88, 0xf2, 0x7e, 0xf5, 0x7b, 0xc7, 0x9e, 0xfd, 0x7f, 0x0b, 0x90, 0xba, 0x05, 0x07, + 0xed, 0x40, 0xa5, 0x2e, 0xef, 0x26, 0xce, 0xa7, 0x90, 0xab, 0xba, 0xea, 0x58, 0x1f, 0xff, 0xa8, + 0x26, 0xac, 0x99, 0xa1, 0xf7, 0xf3, 0x9a, 0xa9, 0x82, 0x75, 0x21, 0x8f, 0xbc, 0xe4, 0xaa, 0xa2, + 0x67, 0x5e, 0xa2, 0x25, 0xdb, 0xb0, 0xc1, 0x0f, 0xc5, 0x50, 0xd9, 0x94, 0xb7, 0xfd, 0xe4, 0x23, + 0xee, 0xd4, 0xe5, 0x41, 0xdc, 0x44, 0x53, 0x7f, 0xb1, 0x66, 0x64, 0xff, 0x49, 0x01, 0x4e, 0x27, + 0x3f, 0x80, 0x38, 0xae, 0xfb, 0x35, 0x0b, 0x1e, 0xf4, 0x9c, 0x28, 0xae, 0xb6, 0xd9, 0x46, 0x61, + 0xa3, 0xed, 0xad, 0xa4, 0xca, 0xeb, 0x1e, 0xd6, 0xd9, 0xa2, 0x08, 0xa7, 0x6f, 0x87, 0x9a, 0x7d, + 0xf8, 0xf6, 0xee, 0xe4, 0x83, 0x4b, 0xd9, 0xcc, 0x71, 0xaf, 0x5e, 0xa1, 0xcf, 0x5a, 0x70, 0xa2, + 0xd6, 0x0e, 0x43, 0xe2, 0xc7, 0xba, 0xab, 0xfc, 0x2b, 0x5e, 0xcd, 0x65, 0x20, 0x75, 0x07, 0x4f, + 0x53, 0x81, 0x3a, 0x97, 0xe2, 0x85, 0xbb, 0xb8, 0xdb, 0xbf, 0x40, 0x35, 0x67, 0xcf, 0xf7, 0xfc, + 0x5b, 0x76, 0x9d, 0xd5, 0x5f, 0x0c, 0xc2, 0x68, 0xa2, 0x86, 0x70, 0xe2, 0x88, 0xcb, 0xda, 0xf7, + 0x88, 0x8b, 0x25, 0x83, 0xb5, 0x7d, 0x79, 0xd9, 0xae, 0x91, 0x0c, 0xd6, 0xf6, 0x09, 0xe6, 0x30, + 0x31, 0xa4, 0xb8, 0xed, 0x8b, 0xe8, 0x76, 0x73, 0x48, 0x71, 0xdb, 0xc7, 0x02, 0x8a, 0x3e, 0x64, + 0xc1, 0x08, 0x5b, 0x7c, 0xe2, 0x80, 0x50, 0x28, 0xb4, 0xcb, 0x39, 0x2c, 0x77, 0x59, 0x2f, 0x9b, + 0x45, 0x43, 0x9a, 0x2d, 0x38, 0xc1, 0x11, 0x7d, 0xd4, 0x82, 0x8a, 0xba, 0x9f, 0x4f, 0xdc, 0x9c, + 0x5d, 0xcd, 0xb7, 0x44, 0x73, 0x4a, 0xea, 0xa9, 0x5a, 0xb9, 0x58, 0x33, 0x46, 0x91, 0x3a, 0xbd, + 0x1b, 0x3a, 0x9a, 0xd3, 0x3b, 0xc8, 0x38, 0xb9, 0x7b, 0x2b, 0x54, 0x9a, 0x8e, 0xef, 0x6e, 0x90, + 0x28, 0xe6, 0x07, 0x6a, 0xb2, 0x72, 0xbc, 0x6c, 0xc4, 0x1a, 0x4e, 0x8d, 0xfd, 0x88, 0xbd, 0x58, + 0x6c, 0x9c, 0x80, 0x31, 0x63, 0xbf, 0xaa, 0x9b, 0xb1, 0x89, 0x63, 0x1e, 0xd7, 0xc1, 0x3d, 0x3d, + 0xae, 0x1b, 0xde, 0xe7, 0xb8, 0xae, 0x0a, 0x67, 0x9c, 0x76, 0x1c, 0x5c, 0x22, 0x8e, 0x37, 0xc3, + 0x6f, 0xe8, 0x8f, 0x78, 0xd9, 0xe9, 0x11, 0xe6, 0x02, 0x56, 0xf1, 0x5b, 0x55, 0xe2, 0x6d, 0x74, + 0x21, 0xe1, 0xec, 0x67, 0xed, 0x7f, 0x6e, 0xc1, 0x99, 0xcc, 0xa9, 0x70, 0xff, 0x46, 0xce, 0xdb, + 0x9f, 0x2f, 0xc1, 0xa9, 0x8c, 0x0a, 0xe3, 0xa8, 0x63, 0x2e, 0x12, 0x2b, 0x8f, 0x20, 0xb4, 0x64, + 0x4c, 0x95, 0xfc, 0x36, 0x19, 0x2b, 0xe3, 0x60, 0x27, 0xf0, 0xfa, 0x14, 0xbc, 0x78, 0xbc, 0xa7, + 0xe0, 0xc6, 0x5c, 0x1f, 0xb8, 0xa7, 0x73, 0xbd, 0xb4, 0xcf, 0x5c, 0xff, 0x9a, 0x05, 0x13, 0xcd, + 0x1e, 0xd7, 0xda, 0x88, 0xf3, 0xa4, 0xeb, 0x47, 0x73, 0x69, 0xce, 0xec, 0x23, 0xb7, 0x77, 0x27, + 0x7b, 0xde, 0x26, 0x84, 0x7b, 0xf6, 0xca, 0xfe, 0x4e, 0x11, 0x98, 0xbd, 0xc6, 0xaa, 0xc8, 0x76, + 0xd0, 0x07, 0xcd, 0x8b, 0x0a, 0xac, 0xbc, 0x8a, 0xea, 0x73, 0xe2, 0xea, 0xa2, 0x03, 0x3e, 0x82, + 0x59, 0xf7, 0x1e, 0xa4, 0x25, 0x61, 0xa1, 0x0f, 0x49, 0xe8, 0xc9, 0x1b, 0x21, 0x8a, 0xf9, 0xdf, + 0x08, 0x51, 0x49, 0xdf, 0x06, 0xb1, 0xf7, 0x27, 0x1e, 0xb8, 0x2f, 0x3f, 0xf1, 0x2f, 0x5b, 0x5c, + 0xf0, 0xa4, 0xbe, 0x82, 0x36, 0x37, 0xac, 0x3d, 0xcc, 0x8d, 0xa7, 0xa0, 0x1c, 0x09, 0xc9, 0x2c, + 0xcc, 0x12, 0x1d, 0x00, 0x25, 0xda, 0xb1, 0xc2, 0xa0, 0xbb, 0x2e, 0xc7, 0xf3, 0x82, 0x5b, 0x17, + 0x9a, 0xad, 0xb8, 0x23, 0x0c, 0x14, 0xb5, 0x2d, 0x98, 0x51, 0x10, 0x6c, 0x60, 0xd9, 0x9b, 0x60, + 0x6c, 0x18, 0xee, 0xfe, 0x16, 0xd0, 0x3e, 0xae, 0x6f, 0xfe, 0x87, 0x05, 0xc1, 0x8a, 0x6f, 0x00, + 0x9e, 0x4f, 0x5d, 0x97, 0xdd, 0x7f, 0xa8, 0xdb, 0xfb, 0x01, 0x6a, 0x41, 0xb3, 0x45, 0xb7, 0xc4, + 0x6b, 0x41, 0x3e, 0xfb, 0xa8, 0x39, 0x45, 0x4f, 0x0f, 0x98, 0x6e, 0xc3, 0x06, 0xbf, 0x84, 0xd4, + 0x2e, 0xee, 0x2b, 0xb5, 0x13, 0x02, 0x6c, 0x60, 0x6f, 0x01, 0x66, 0xff, 0x95, 0x05, 0x09, 0x83, + 0x0e, 0xb5, 0xa0, 0x44, 0xbb, 0xdb, 0x11, 0xb2, 0x60, 0x25, 0x3f, 0xeb, 0x91, 0x0a, 0x61, 0xb1, + 0xc0, 0xd8, 0x4f, 0xcc, 0x19, 0x21, 0x4f, 0x84, 0xf5, 0xe5, 0xb2, 0xaf, 0x31, 0x19, 0x5e, 0x0a, + 0x82, 0x2d, 0x1e, 0x19, 0xa3, 0x43, 0x04, 0xed, 0xe7, 0xe1, 0x64, 0x57, 0xa7, 0xd8, 0xcd, 0xa1, + 0x81, 0xdc, 0x9c, 0x1b, 0x0b, 0x83, 0x55, 0x39, 0xc0, 0x1c, 0x66, 0x7f, 0xd5, 0x82, 0x13, 0x69, + 0xf2, 0xe8, 0x75, 0x0b, 0x4e, 0x46, 0x69, 0x7a, 0x47, 0x35, 0x76, 0x2a, 0x34, 0xbf, 0x0b, 0x84, + 0xbb, 0x3b, 0x61, 0xff, 0x8d, 0x98, 0xfc, 0x37, 0x5c, 0xbf, 0x1e, 0xdc, 0x52, 0x26, 0x90, 0xd5, + 0xd3, 0x04, 0xa2, 0x2b, 0xbf, 0xb6, 0x49, 0xea, 0x6d, 0xaf, 0xab, 0x66, 0x42, 0x55, 0xb4, 0x63, + 0x85, 0xc1, 0x52, 0xc4, 0xdb, 0x62, 0x4b, 0x9a, 0x9a, 0x94, 0xf3, 0xa2, 0x1d, 0x2b, 0x0c, 0xf4, + 0x2c, 0x8c, 0x18, 0x2f, 0x29, 0xe7, 0x25, 0xdb, 0x4f, 0x18, 0xca, 0x39, 0xc2, 0x09, 0x2c, 0x34, + 0x05, 0xa0, 0xcc, 0x29, 0xa9, 0x8c, 0x99, 0x0f, 0x5d, 0xc9, 0xbc, 0x08, 0x1b, 0x18, 0xac, 0x20, + 0x83, 0xd7, 0x8e, 0xd8, 0x21, 0xf1, 0xa0, 0x2e, 0x98, 0x3e, 0x27, 0xda, 0xb0, 0x82, 0x52, 0xb9, + 0xd5, 0x74, 0xfc, 0xb6, 0xe3, 0xd1, 0x11, 0x12, 0x5e, 0x31, 0xb5, 0x0c, 0x97, 0x15, 0x04, 0x1b, + 0x58, 0xf4, 0x8d, 0x63, 0xb7, 0x49, 0x5e, 0x0a, 0x7c, 0x19, 0x52, 0xad, 0xe3, 0x06, 0x44, 0x3b, + 0x56, 0x18, 0xf6, 0x5f, 0x5a, 0x30, 0xae, 0x2b, 0xc1, 0x30, 0x5f, 0x56, 0xc2, 0x89, 0x67, 0xed, + 0xeb, 0xc4, 0x4b, 0xd6, 0xbd, 0x28, 0xf4, 0x55, 0xf7, 0xc2, 0x2c, 0x49, 0x51, 0xdc, 0xb3, 0x24, + 0xc5, 0x0f, 0xc2, 0xd0, 0x16, 0xe9, 0x18, 0xb5, 0x2b, 0x86, 0xa9, 0x39, 0x74, 0x85, 0x37, 0x61, + 0x09, 0x43, 0x36, 0x0c, 0xd6, 0x1c, 0x55, 0x31, 0x6d, 0x84, 0x6f, 0x7d, 0xe6, 0x66, 0x18, 0x92, + 0x80, 0xd8, 0x2b, 0x50, 0x51, 0xc7, 0xe7, 0xd2, 0xa7, 0x66, 0x65, 0xfb, 0xd4, 0xfa, 0x4a, 0x8d, + 0x9f, 0x5d, 0xff, 0xfa, 0x77, 0x1f, 0x7b, 0xd3, 0x1f, 0x7d, 0xf7, 0xb1, 0x37, 0x7d, 0xfb, 0xbb, + 0x8f, 0xbd, 0xe9, 0x43, 0xb7, 0x1f, 0xb3, 0xbe, 0x7e, 0xfb, 0x31, 0xeb, 0x8f, 0x6e, 0x3f, 0x66, + 0x7d, 0xfb, 0xf6, 0x63, 0xd6, 0x77, 0x6e, 0x3f, 0x66, 0x7d, 0xf6, 0xbf, 0x3e, 0xf6, 0xa6, 0x97, + 0x32, 0x63, 0xea, 0xe9, 0x8f, 0xa7, 0x6b, 0xf5, 0xe9, 0xed, 0xf3, 0x2c, 0xac, 0x9b, 0x2e, 0xaf, + 0x69, 0x63, 0x4e, 0x4d, 0xcb, 0xe5, 0xf5, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa6, 0xfc, 0x44, + 0xf5, 0xc0, 0xf5, 0x00, 0x00, } func (m *AWSAuthConfig) Marshal() (dAtA []byte, err error) { @@ -7224,6 +7457,11 @@ func (m *ApplicationSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x72 i -= len(m.Ref) copy(dAtA[i:], m.Ref) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Ref))) @@ -7371,6 +7609,22 @@ func (m *ApplicationSourceHelm) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i-- + if m.SkipSchemaValidation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + i-- + if m.SkipTests { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 if len(m.APIVersions) > 0 { for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.APIVersions[iNdEx]) @@ -7861,6 +8115,18 @@ func (m *ApplicationSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SourceHydrator != nil { + { + size, err := m.SourceHydrator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } if len(m.Sources) > 0 { for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { { @@ -7970,6 +8236,16 @@ func (m *ApplicationStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.SourceHydrator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 i -= len(m.ControllerNamespace) copy(dAtA[i:], m.ControllerNamespace) i = encodeVarintGenerated(dAtA, i, uint64(len(m.ControllerNamespace))) @@ -8658,6 +8934,19 @@ func (m *ClusterConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i -= len(m.ProxyUrl) + copy(dAtA[i:], m.ProxyUrl) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProxyUrl))) + i-- + dAtA[i] = 0x42 + i-- + if m.DisableCompression { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 if m.ExecProviderConfig != nil { { size, err := m.ExecProviderConfig.MarshalToSizedBuffer(dAtA[:i]) @@ -8730,6 +9019,14 @@ func (m *ClusterGenerator) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i-- + if m.FlatList { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 if len(m.Values) > 0 { keysForValues := make([]string, 0, len(m.Values)) for k := range m.Values { @@ -9170,6 +9467,44 @@ func (m *ConnectionState) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *DrySource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DrySource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DrySource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + i -= len(m.TargetRevision) + copy(dAtA[i:], m.TargetRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetRevision))) + i-- + dAtA[i] = 0x12 + i -= len(m.RepoURL) + copy(dAtA[i:], m.RepoURL) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RepoURL))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *DuckTypeGenerator) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -9668,6 +10003,18 @@ func (m *HealthStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.LastTransitionTime != nil { + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) @@ -9876,6 +10223,109 @@ func (m *HostResourceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *HydrateOperation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HydrateOperation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HydrateOperation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.SourceHydrator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + i -= len(m.HydratedSHA) + copy(dAtA[i:], m.HydratedSHA) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HydratedSHA))) + i-- + dAtA[i] = 0x32 + i -= len(m.DrySHA) + copy(dAtA[i:], m.DrySHA) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DrySHA))) + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0x1a + if m.FinishedAt != nil { + { + size, err := m.FinishedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HydrateTo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HydrateTo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HydrateTo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TargetBranch) + copy(dAtA[i:], m.TargetBranch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetBranch))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *Info) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -13162,6 +13612,14 @@ func (m *ResourceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + i-- + if m.RequiresDeletionConfirmation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 i = encodeVarintGenerated(dAtA, i, uint64(m.SyncWave)) i-- dAtA[i] = 0x50 @@ -14171,6 +14629,151 @@ func (m *SignatureKey) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SourceHydrator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceHydrator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceHydrator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HydrateTo != nil { + { + size, err := m.HydrateTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.SyncSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.DrySource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceHydratorStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceHydratorStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceHydratorStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CurrentOperation != nil { + { + size, err := m.CurrentOperation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.LastSuccessfulOperation != nil { + { + size, err := m.LastSuccessfulOperation.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SuccessfulHydrateOperation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SuccessfulHydrateOperation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SuccessfulHydrateOperation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.SourceHydrator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + i -= len(m.HydratedSHA) + copy(dAtA[i:], m.HydratedSHA) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HydratedSHA))) + i-- + dAtA[i] = 0x32 + i -= len(m.DrySHA) + copy(dAtA[i:], m.DrySHA) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DrySHA))) + i-- + dAtA[i] = 0x2a + return len(dAtA) - i, nil +} + func (m *SyncOperation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -14542,6 +15145,39 @@ func (m *SyncPolicyAutomated) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SyncSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SyncSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SyncSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetBranch) + copy(dAtA[i:], m.TargetBranch) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetBranch))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + func (m *SyncStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -15628,6 +16264,8 @@ func (m *ApplicationSource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Ref) n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -15694,6 +16332,8 @@ func (m *ApplicationSourceHelm) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + n += 2 + n += 2 return n } @@ -15878,6 +16518,10 @@ func (m *ApplicationSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.SourceHydrator != nil { + l = m.SourceHydrator.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -15935,6 +16579,8 @@ func (m *ApplicationStatus) Size() (n int) { } l = len(m.ControllerNamespace) n += 1 + l + sovGenerated(uint64(l)) + l = m.SourceHydrator.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -16166,6 +16812,9 @@ func (m *ClusterConfig) Size() (n int) { l = m.ExecProviderConfig.Size() n += 1 + l + sovGenerated(uint64(l)) } + n += 2 + l = len(m.ProxyUrl) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -16187,6 +16836,7 @@ func (m *ClusterGenerator) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } + n += 2 return n } @@ -16338,6 +16988,21 @@ func (m *ConnectionState) Size() (n int) { return n } +func (m *DrySource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RepoURL) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TargetRevision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *DuckTypeGenerator) Size() (n int) { if m == nil { return 0 @@ -16528,6 +17193,10 @@ func (m *HealthStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Message) n += 1 + l + sovGenerated(uint64(l)) + if m.LastTransitionTime != nil { + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -16606,6 +17275,42 @@ func (m *HostResourceInfo) Size() (n int) { return n } +func (m *HydrateOperation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.FinishedAt != nil { + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DrySHA) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HydratedSHA) + n += 1 + l + sovGenerated(uint64(l)) + l = m.SourceHydrator.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HydrateTo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetBranch) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *Info) Size() (n int) { if m == nil { return 0 @@ -17850,6 +18555,7 @@ func (m *ResourceStatus) Size() (n int) { n += 2 n += 2 n += 1 + sovGenerated(uint64(m.SyncWave)) + n += 2 return n } @@ -18192,6 +18898,55 @@ func (m *SignatureKey) Size() (n int) { return n } +func (m *SourceHydrator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DrySource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SyncSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.HydrateTo != nil { + l = m.HydrateTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SourceHydratorStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LastSuccessfulOperation != nil { + l = m.LastSuccessfulOperation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CurrentOperation != nil { + l = m.CurrentOperation.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SuccessfulHydrateOperation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DrySHA) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HydratedSHA) + n += 1 + l + sovGenerated(uint64(l)) + l = m.SourceHydrator.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *SyncOperation) Size() (n int) { if m == nil { return 0 @@ -18335,6 +19090,19 @@ func (m *SyncPolicyAutomated) Size() (n int) { return n } +func (m *SyncSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetBranch) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *SyncStatus) Size() (n int) { if m == nil { return 0 @@ -18996,6 +19764,7 @@ func (this *ApplicationSource) String() string { `Plugin:` + strings.Replace(this.Plugin.String(), "ApplicationSourcePlugin", "ApplicationSourcePlugin", 1) + `,`, `Chart:` + fmt.Sprintf("%v", this.Chart) + `,`, `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `}`, }, "") return s @@ -19041,6 +19810,8 @@ func (this *ApplicationSourceHelm) String() string { `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `KubeVersion:` + fmt.Sprintf("%v", this.KubeVersion) + `,`, `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `SkipTests:` + fmt.Sprintf("%v", this.SkipTests) + `,`, + `SkipSchemaValidation:` + fmt.Sprintf("%v", this.SkipSchemaValidation) + `,`, `}`, }, "") return s @@ -19185,6 +19956,7 @@ func (this *ApplicationSpec) String() string { `Info:` + repeatedStringForInfo + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `Sources:` + repeatedStringForSources + `,`, + `SourceHydrator:` + strings.Replace(this.SourceHydrator.String(), "SourceHydrator", "SourceHydrator", 1) + `,`, `}`, }, "") return s @@ -19222,6 +19994,7 @@ func (this *ApplicationStatus) String() string { `ResourceHealthSource:` + fmt.Sprintf("%v", this.ResourceHealthSource) + `,`, `SourceTypes:` + fmt.Sprintf("%v", this.SourceTypes) + `,`, `ControllerNamespace:` + fmt.Sprintf("%v", this.ControllerNamespace) + `,`, + `SourceHydrator:` + strings.Replace(strings.Replace(this.SourceHydrator.String(), "SourceHydratorStatus", "SourceHydratorStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -19396,6 +20169,8 @@ func (this *ClusterConfig) String() string { `TLSClientConfig:` + strings.Replace(strings.Replace(this.TLSClientConfig.String(), "TLSClientConfig", "TLSClientConfig", 1), `&`, ``, 1) + `,`, `AWSAuthConfig:` + strings.Replace(this.AWSAuthConfig.String(), "AWSAuthConfig", "AWSAuthConfig", 1) + `,`, `ExecProviderConfig:` + strings.Replace(this.ExecProviderConfig.String(), "ExecProviderConfig", "ExecProviderConfig", 1) + `,`, + `DisableCompression:` + fmt.Sprintf("%v", this.DisableCompression) + `,`, + `ProxyUrl:` + fmt.Sprintf("%v", this.ProxyUrl) + `,`, `}`, }, "") return s @@ -19418,6 +20193,7 @@ func (this *ClusterGenerator) String() string { `Selector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "ApplicationSetTemplate", "ApplicationSetTemplate", 1), `&`, ``, 1) + `,`, `Values:` + mapStringForValues + `,`, + `FlatList:` + fmt.Sprintf("%v", this.FlatList) + `,`, `}`, }, "") return s @@ -19534,6 +20310,18 @@ func (this *ConnectionState) String() string { }, "") return s } +func (this *DrySource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DrySource{`, + `RepoURL:` + fmt.Sprintf("%v", this.RepoURL) + `,`, + `TargetRevision:` + fmt.Sprintf("%v", this.TargetRevision) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} func (this *DuckTypeGenerator) String() string { if this == nil { return "nil" @@ -19699,6 +20487,7 @@ func (this *HealthStatus) String() string { s := strings.Join([]string{`&HealthStatus{`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastTransitionTime:` + strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1) + `,`, `}`, }, "") return s @@ -19766,6 +20555,32 @@ func (this *HostResourceInfo) String() string { }, "") return s } +func (this *HydrateOperation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HydrateOperation{`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(fmt.Sprintf("%v", this.FinishedAt), "Time", "v1.Time", 1) + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `DrySHA:` + fmt.Sprintf("%v", this.DrySHA) + `,`, + `HydratedSHA:` + fmt.Sprintf("%v", this.HydratedSHA) + `,`, + `SourceHydrator:` + strings.Replace(strings.Replace(this.SourceHydrator.String(), "SourceHydrator", "SourceHydrator", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HydrateTo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HydrateTo{`, + `TargetBranch:` + fmt.Sprintf("%v", this.TargetBranch) + `,`, + `}`, + }, "") + return s +} func (this *Info) String() string { if this == nil { return "nil" @@ -20711,6 +21526,7 @@ func (this *ResourceStatus) String() string { `Hook:` + fmt.Sprintf("%v", this.Hook) + `,`, `RequiresPruning:` + fmt.Sprintf("%v", this.RequiresPruning) + `,`, `SyncWave:` + fmt.Sprintf("%v", this.SyncWave) + `,`, + `RequiresDeletionConfirmation:` + fmt.Sprintf("%v", this.RequiresDeletionConfirmation) + `,`, `}`, }, "") return s @@ -20940,6 +21756,41 @@ func (this *SignatureKey) String() string { }, "") return s } +func (this *SourceHydrator) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceHydrator{`, + `DrySource:` + strings.Replace(strings.Replace(this.DrySource.String(), "DrySource", "DrySource", 1), `&`, ``, 1) + `,`, + `SyncSource:` + strings.Replace(strings.Replace(this.SyncSource.String(), "SyncSource", "SyncSource", 1), `&`, ``, 1) + `,`, + `HydrateTo:` + strings.Replace(this.HydrateTo.String(), "HydrateTo", "HydrateTo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SourceHydratorStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceHydratorStatus{`, + `LastSuccessfulOperation:` + strings.Replace(this.LastSuccessfulOperation.String(), "SuccessfulHydrateOperation", "SuccessfulHydrateOperation", 1) + `,`, + `CurrentOperation:` + strings.Replace(this.CurrentOperation.String(), "HydrateOperation", "HydrateOperation", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SuccessfulHydrateOperation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SuccessfulHydrateOperation{`, + `DrySHA:` + fmt.Sprintf("%v", this.DrySHA) + `,`, + `HydratedSHA:` + fmt.Sprintf("%v", this.HydratedSHA) + `,`, + `SourceHydrator:` + strings.Replace(strings.Replace(this.SourceHydrator.String(), "SourceHydrator", "SourceHydrator", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *SyncOperation) String() string { if this == nil { return "nil" @@ -21033,6 +21884,17 @@ func (this *SyncPolicyAutomated) String() string { }, "") return s } +func (this *SyncSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SyncSource{`, + `TargetBranch:` + fmt.Sprintf("%v", this.TargetBranch) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} func (this *SyncStatus) String() string { if this == nil { return "nil" @@ -27134,6 +27996,38 @@ func (m *ApplicationSource) Unmarshal(dAtA []byte) error { } m.Ref = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -27739,6 +28633,46 @@ func (m *ApplicationSourceHelm) Unmarshal(dAtA []byte) error { } m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipTests", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipTests = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipSchemaValidation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipSchemaValidation = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -29243,6 +30177,42 @@ func (m *ApplicationSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceHydrator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceHydrator == nil { + m.SourceHydrator = &SourceHydrator{} + } + if err := m.SourceHydrator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -29730,61 +30700,11 @@ func (m *ApplicationStatus) Unmarshal(dAtA []byte) error { } m.ControllerNamespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ApplicationSummary) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ApplicationSummary: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationSummary: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalURLs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceHydrator", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29794,55 +30714,24 @@ func (m *ApplicationSummary) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ExternalURLs = append(m.ExternalURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.SourceHydrator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Images = append(m.Images, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -29865,7 +30754,7 @@ func (m *ApplicationSummary) Unmarshal(dAtA []byte) error { } return nil } -func (m *ApplicationTree) Unmarshal(dAtA []byte) error { +func (m *ApplicationSummary) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29888,17 +30777,17 @@ func (m *ApplicationTree) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplicationTree: wiretype end group for non-group") + return fmt.Errorf("proto: ApplicationSummary: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationTree: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplicationSummary: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExternalURLs", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29908,65 +30797,29 @@ func (m *ApplicationTree) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Nodes = append(m.Nodes, ResourceNode{}) - if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ExternalURLs = append(m.ExternalURLs, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OrphanedNodes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OrphanedNodes = append(m.OrphanedNodes, ResourceNode{}) - if err := m.OrphanedNodes[len(m.OrphanedNodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29976,45 +30829,24 @@ func (m *ApplicationTree) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Hosts = append(m.Hosts, HostInfo{}) - if err := m.Hosts[len(m.Hosts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Images = append(m.Images, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ShardsCount", wireType) - } - m.ShardsCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ShardsCount |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30036,7 +30868,7 @@ func (m *ApplicationTree) Unmarshal(dAtA []byte) error { } return nil } -func (m *ApplicationWatchEvent) Unmarshal(dAtA []byte) error { +func (m *ApplicationTree) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30059,47 +30891,218 @@ func (m *ApplicationWatchEvent) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ApplicationWatchEvent: wiretype end group for non-group") + return fmt.Errorf("proto: ApplicationTree: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ApplicationWatchEvent: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ApplicationTree: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = k8s_io_apimachinery_pkg_watch.EventType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Application", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, ResourceNode{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrphanedNodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OrphanedNodes = append(m.OrphanedNodes, ResourceNode{}) + if err := m.OrphanedNodes[len(m.OrphanedNodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, HostInfo{}) + if err := m.Hosts[len(m.Hosts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardsCount", wireType) + } + m.ShardsCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ShardsCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ApplicationWatchEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ApplicationWatchEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ApplicationWatchEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = k8s_io_apimachinery_pkg_watch.EventType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Application", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31714,6 +32717,58 @@ func (m *ClusterConfig) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisableCompression", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DisableCompression = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProxyUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProxyUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31957,6 +33012,26 @@ func (m *ClusterGenerator) Unmarshal(dAtA []byte) error { } m.Values[mapkey] = mapvalue iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FlatList", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FlatList = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33173,6 +34248,152 @@ func (m *ConnectionState) Unmarshal(dAtA []byte) error { } return nil } +func (m *DrySource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DrySource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DrySource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RepoURL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RepoURL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *DuckTypeGenerator) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -34929,15 +36150,361 @@ func (m *HealthStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HealthStatus: wiretype end group for non-group") + return fmt.Errorf("proto: HealthStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = github_com_argoproj_gitops_engine_pkg_health.HealthStatusCode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastTransitionTime == nil { + m.LastTransitionTime = &v1.Time{} + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HelmFileParameter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HelmFileParameter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HelmFileParameter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HelmOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HelmOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HelmOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValuesFileSchemes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValuesFileSchemes = append(m.ValuesFileSchemes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HelmParameter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HelmParameter: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HealthStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HelmParameter: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34965,11 +36532,11 @@ func (m *HealthStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = github_com_argoproj_gitops_engine_pkg_health.HealthStatusCode(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34997,8 +36564,28 @@ func (m *HealthStatus) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceString", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForceString = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -35020,7 +36607,7 @@ func (m *HealthStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *HelmFileParameter) Unmarshal(dAtA []byte) error { +func (m *HostInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35043,10 +36630,10 @@ func (m *HelmFileParameter) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HelmFileParameter: wiretype end group for non-group") + return fmt.Errorf("proto: HostInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HelmFileParameter: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HostInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -35083,9 +36670,9 @@ func (m *HelmFileParameter) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourcesInfo", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35095,23 +36682,58 @@ func (m *HelmFileParameter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.ResourcesInfo = append(m.ResourcesInfo, HostResourceInfo{}) + if err := m.ResourcesInfo[len(m.ResourcesInfo)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SystemInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SystemInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -35134,7 +36756,7 @@ func (m *HelmFileParameter) Unmarshal(dAtA []byte) error { } return nil } -func (m *HelmOptions) Unmarshal(dAtA []byte) error { +func (m *HostResourceInfo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35157,15 +36779,15 @@ func (m *HelmOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HelmOptions: wiretype end group for non-group") + return fmt.Errorf("proto: HostResourceInfo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HelmOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HostResourceInfo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValuesFileSchemes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35193,8 +36815,65 @@ func (m *HelmOptions) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ValuesFileSchemes = append(m.ValuesFileSchemes, string(dAtA[iNdEx:postIndex])) + m.ResourceName = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestedByApp", wireType) + } + m.RequestedByApp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RequestedByApp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestedByNeighbors", wireType) + } + m.RequestedByNeighbors = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RequestedByNeighbors |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + m.Capacity = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Capacity |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -35216,7 +36895,7 @@ func (m *HelmOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *HelmParameter) Unmarshal(dAtA []byte) error { +func (m *HydrateOperation) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35239,17 +36918,17 @@ func (m *HelmParameter) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HelmParameter: wiretype end group for non-group") + return fmt.Errorf("proto: HydrateOperation: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HelmParameter: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HydrateOperation: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35259,27 +36938,64 @@ func (m *HelmParameter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FinishedAt == nil { + m.FinishedAt = &v1.Time{} + } + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35307,13 +37023,13 @@ func (m *HelmParameter) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + m.Phase = HydrateOperationPhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ForceString", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35323,65 +37039,27 @@ func (m *HelmParameter) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - m.ForceString = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DrySHA", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35409,13 +37087,13 @@ func (m *HostInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.DrySHA = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourcesInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HydratedSHA", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35425,29 +37103,27 @@ func (m *HostInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthGenerated } if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourcesInfo = append(m.ResourcesInfo, HostResourceInfo{}) - if err := m.ResourcesInfo[len(m.ResourcesInfo)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.HydratedSHA = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SystemInfo", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SourceHydrator", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35474,7 +37150,7 @@ func (m *HostInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.SystemInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.SourceHydrator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -35499,7 +37175,7 @@ func (m *HostInfo) Unmarshal(dAtA []byte) error { } return nil } -func (m *HostResourceInfo) Unmarshal(dAtA []byte) error { +func (m *HydrateTo) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35522,15 +37198,15 @@ func (m *HostResourceInfo) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HostResourceInfo: wiretype end group for non-group") + return fmt.Errorf("proto: HydrateTo: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HostResourceInfo: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HydrateTo: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetBranch", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35558,65 +37234,8 @@ func (m *HostResourceInfo) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceName = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + m.TargetBranch = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestedByApp", wireType) - } - m.RequestedByApp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RequestedByApp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestedByNeighbors", wireType) - } - m.RequestedByNeighbors = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RequestedByNeighbors |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) - } - m.Capacity = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Capacity |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -47192,6 +48811,26 @@ func (m *ResourceStatus) Unmarshal(dAtA []byte) error { break } } + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiresDeletionConfirmation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequiresDeletionConfirmation = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -50222,15 +51861,371 @@ func (m *SignatureKey) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SignatureKey: wiretype end group for non-group") + return fmt.Errorf("proto: SignatureKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KeyID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KeyID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceHydrator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceHydrator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceHydrator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DrySource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DrySource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SyncSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SyncSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HydrateTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HydrateTo == nil { + m.HydrateTo = &HydrateTo{} + } + if err := m.HydrateTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceHydratorStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceHydratorStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SignatureKey: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SourceHydratorStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KeyID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastSuccessfulOperation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastSuccessfulOperation == nil { + m.LastSuccessfulOperation = &SuccessfulHydrateOperation{} + } + if err := m.LastSuccessfulOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentOperation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CurrentOperation == nil { + m.CurrentOperation = &HydrateOperation{} + } + if err := m.CurrentOperation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SuccessfulHydrateOperation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SuccessfulHydrateOperation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SuccessfulHydrateOperation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DrySHA", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -50258,7 +52253,72 @@ func (m *SignatureKey) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.KeyID = string(dAtA[iNdEx:postIndex]) + m.DrySHA = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HydratedSHA", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HydratedSHA = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceHydrator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SourceHydrator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -51387,6 +53447,120 @@ func (m *SyncPolicyAutomated) Unmarshal(dAtA []byte) error { } return nil } +func (m *SyncSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SyncSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SyncSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetBranch", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetBranch = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *SyncStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto index 9d3d22e68..6823a7626 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/generated.proto @@ -454,6 +454,9 @@ message ApplicationSource { // Ref is reference to another source within sources field. This field will not be used if used with a `source` tag. optional string ref = 13; + + // Name is used to refer to a source and is displayed in the UI. It is used in multi-source Applications. + optional string name = 14; } // ApplicationSourceDirectory holds options for applications of type plain YAML or Jsonnet @@ -515,6 +518,12 @@ message ApplicationSourceHelm { // APIVersions specifies the Kubernetes resource API versions to pass to Helm when templating manifests. By default, // Argo CD uses the API versions of the target cluster. The format is [group/]version/kind. repeated string apiVersions = 13; + + // SkipTests skips test manifest installation step (Helm's --skip-tests). + optional bool skipTests = 14; + + // SkipSchemaValidation skips JSON schema validation (Helm's --skip-schema-validation) + optional bool skipSchemaValidation = 15; } // ApplicationSourceJsonnet holds options specific to applications of type Jsonnet @@ -635,6 +644,9 @@ message ApplicationSpec { // Sources is a reference to the location of the application's manifests or chart repeated ApplicationSource sources = 8; + + // SourceHydrator provides a way to push hydrated manifests back to git before syncing them to the cluster. + optional SourceHydrator sourceHydrator = 9; } // ApplicationStatus contains status information for the application @@ -678,6 +690,9 @@ message ApplicationStatus { // ControllerNamespace indicates the namespace in which the application controller is located optional string controllerNamespace = 13; + + // SourceHydrator stores information about the current state of source hydration + optional SourceHydratorStatus sourceHydrator = 14; } // ApplicationSummary contains information about URLs and container images used by an application @@ -838,6 +853,12 @@ message ClusterConfig { // ExecProviderConfig contains configuration for an exec provider optional ExecProviderConfig execProviderConfig = 6; + + // DisableCompression bypasses automatic GZip compression requests to the server. + optional bool disableCompression = 7; + + // ProxyURL is the URL to the proxy to be used for all requests send to the server + optional string proxyUrl = 8; } // ClusterGenerator defines a generator to match against clusters registered with ArgoCD. @@ -851,6 +872,9 @@ message ClusterGenerator { // Values contains key/value pairs which are passed directly as parameters to the template map values = 3; + + // returns the clusters a single 'clusters' value in the template + optional bool flatList = 4; } // ClusterInfo contains information about the cluster @@ -939,6 +963,18 @@ message ConnectionState { optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time attemptedAt = 3; } +// DrySource specifies a location for dry "don't repeat yourself" manifest source information. +message DrySource { + // RepoURL is the URL to the git repository that contains the application manifests + optional string repoURL = 1; + + // TargetRevision defines the revision of the source to hydrate + optional string targetRevision = 2; + + // Path is a directory path within the Git repository where the manifests are located + optional string path = 3; +} + // DuckType defines a generator to match against clusters registered with ArgoCD. message DuckTypeGenerator { // ConfigMapRef is a ConfigMap with the duck type definitions needed to retrieve the data @@ -1054,6 +1090,9 @@ message HealthStatus { // Message is a human-readable informational message describing the health status optional string message = 2; + + // LastTransitionTime is the time the HealthStatus was set or updated + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; } // HelmFileParameter is a file parameter that's passed to helm template during manifest generation @@ -1104,6 +1143,37 @@ message HostResourceInfo { optional int64 capacity = 4; } +// HydrateOperation contains information about the most recent hydrate operation +message HydrateOperation { + // StartedAt indicates when the hydrate operation started + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; + + // FinishedAt indicates when the hydrate operation finished + optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 2; + + // Phase indicates the status of the hydrate operation + optional string phase = 3; + + // Message contains a message describing the current status of the hydrate operation + optional string message = 4; + + // DrySHA holds the resolved revision (sha) of the dry source as of the most recent reconciliation + optional string drySHA = 5; + + // HydratedSHA holds the resolved revision (sha) of the hydrated source as of the most recent reconciliation + optional string hydratedSHA = 6; + + // SourceHydrator holds the hydrator config used for the hydrate operation + optional SourceHydrator sourceHydrator = 7; +} + +// HydrateTo specifies a location to which hydrated manifests should be pushed as a "staging area" before being moved to +// the SyncSource. The RepoURL and Path are assumed based on the associated SyncSource config in the SourceHydrator. +message HydrateTo { + // TargetBranch is the branch to which hydrated manifests should be committed + optional string targetBranch = 1; +} + message Info { optional string name = 1; @@ -1956,6 +2026,8 @@ message ResourceStatus { optional bool requiresPruning = 9; optional int64 syncWave = 10; + + optional bool requiresDeletionConfirmation = 11; } // RetryStrategy contains information about the strategy to apply when a sync failed @@ -2221,6 +2293,41 @@ message SignatureKey { optional string keyID = 1; } +// SourceHydrator specifies a dry "don't repeat yourself" source for manifests, a sync source from which to sync +// hydrated manifests, and an optional hydrateTo location to act as a "staging" aread for hydrated manifests. +message SourceHydrator { + // DrySource specifies where the dry "don't repeat yourself" manifest source lives. + optional DrySource drySource = 1; + + // SyncSource specifies where to sync hydrated manifests from. + optional SyncSource syncSource = 2; + + // HydrateTo specifies an optional "staging" location to push hydrated manifests to. An external system would then + // have to move manifests to the SyncSource, e.g. by pull request. + optional HydrateTo hydrateTo = 3; +} + +// SourceHydratorStatus contains information about the current state of source hydration +message SourceHydratorStatus { + // LastSuccessfulOperation holds info about the most recent successful hydration + optional SuccessfulHydrateOperation lastSuccessfulOperation = 1; + + // CurrentOperation holds the status of the hydrate operation + optional HydrateOperation currentOperation = 2; +} + +// SuccessfulHydrateOperation contains information about the most recent successful hydrate operation +message SuccessfulHydrateOperation { + // DrySHA holds the resolved revision (sha) of the dry source as of the most recent reconciliation + optional string drySHA = 5; + + // HydratedSHA holds the resolved revision (sha) of the hydrated source as of the most recent reconciliation + optional string hydratedSHA = 6; + + // SourceHydrator holds the hydrator config used for the hydrate operation + optional SourceHydrator sourceHydrator = 7; +} + // SyncOperation contains details about a sync operation. message SyncOperation { // Revision is the revision (Git) or chart version (Helm) which to sync the application to @@ -2320,6 +2427,17 @@ message SyncPolicyAutomated { optional bool allowEmpty = 3; } +// SyncSource specifies a location from which hydrated manifests may be synced. RepoURL is assumed based on the +// associated DrySource config in the SourceHydrator. +message SyncSource { + // TargetBranch is the branch to which hydrated manifests should be committed + optional string targetBranch = 1; + + // Path is a directory path within the git repository where hydrated manifests should be committed to and synced + // from. If hydrateTo is set, this is just the path from which hydrated manifests will be synced. + optional string path = 2; +} + // SyncStatus contains information about the currently observed live and desired states of an application message SyncStatus { // Status is the sync state of the comparison diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go index 1b2533532..28492686a 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/openapi_generated.go @@ -72,6 +72,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ConfigManagementPlugin": schema_pkg_apis_application_v1alpha1_ConfigManagementPlugin(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ConfigMapKeyRef": schema_pkg_apis_application_v1alpha1_ConfigMapKeyRef(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ConnectionState": schema_pkg_apis_application_v1alpha1_ConnectionState(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.DrySource": schema_pkg_apis_application_v1alpha1_DrySource(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.DuckTypeGenerator": schema_pkg_apis_application_v1alpha1_DuckTypeGenerator(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.EnvEntry": schema_pkg_apis_application_v1alpha1_EnvEntry(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ErrApplicationNotAllowedToUseProject": schema_pkg_apis_application_v1alpha1_ErrApplicationNotAllowedToUseProject(ref), @@ -87,6 +88,8 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HelmParameter": schema_pkg_apis_application_v1alpha1_HelmParameter(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HostInfo": schema_pkg_apis_application_v1alpha1_HostInfo(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HostResourceInfo": schema_pkg_apis_application_v1alpha1_HostResourceInfo(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HydrateOperation": schema_pkg_apis_application_v1alpha1_HydrateOperation(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HydrateTo": schema_pkg_apis_application_v1alpha1_HydrateTo(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Info": schema_pkg_apis_application_v1alpha1_Info(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.InfoItem": schema_pkg_apis_application_v1alpha1_InfoItem(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.JWTToken": schema_pkg_apis_application_v1alpha1_JWTToken(ref), @@ -158,11 +161,15 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SCMProviderGeneratorGitlab": schema_pkg_apis_application_v1alpha1_SCMProviderGeneratorGitlab(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SecretRef": schema_pkg_apis_application_v1alpha1_SecretRef(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SignatureKey": schema_pkg_apis_application_v1alpha1_SignatureKey(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SourceHydrator": schema_pkg_apis_application_v1alpha1_SourceHydrator(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SourceHydratorStatus": schema_pkg_apis_application_v1alpha1_SourceHydratorStatus(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SuccessfulHydrateOperation": schema_pkg_apis_application_v1alpha1_SuccessfulHydrateOperation(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncOperation": schema_pkg_apis_application_v1alpha1_SyncOperation(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncOperationResource": schema_pkg_apis_application_v1alpha1_SyncOperationResource(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncOperationResult": schema_pkg_apis_application_v1alpha1_SyncOperationResult(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncPolicy": schema_pkg_apis_application_v1alpha1_SyncPolicy(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncPolicyAutomated": schema_pkg_apis_application_v1alpha1_SyncPolicyAutomated(ref), + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncSource": schema_pkg_apis_application_v1alpha1_SyncSource(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStatus": schema_pkg_apis_application_v1alpha1_SyncStatus(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStrategy": schema_pkg_apis_application_v1alpha1_SyncStrategy(ref), "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStrategyApply": schema_pkg_apis_application_v1alpha1_SyncStrategyApply(ref), @@ -2317,12 +2324,18 @@ func schema_pkg_apis_application_v1alpha1_ApplicationSpec(ref common.ReferenceCa }, }, }, + "sourceHydrator": { + SchemaProps: spec.SchemaProps{ + Description: "SourceHydrator provides a way to push hydrated manifests back to git before syncing them to the cluster.", + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SourceHydrator"), + }, + }, }, Required: []string{"destination", "project"}, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationDestination", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSource", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Info", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceIgnoreDifferences", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncPolicy"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationDestination", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSource", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.Info", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceIgnoreDifferences", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SourceHydrator", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncPolicy"}, } } @@ -2450,11 +2463,18 @@ func schema_pkg_apis_application_v1alpha1_ApplicationStatus(ref common.Reference Format: "", }, }, + "sourceHydrator": { + SchemaProps: spec.SchemaProps{ + Description: "SourceHydrator stores information about the current state of source hydration", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SourceHydratorStatus"), + }, + }, }, }, }, Dependencies: []string{ - "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationCondition", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSummary", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HealthStatus", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OperationState", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceStatus", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RevisionHistory", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationCondition", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ApplicationSummary", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HealthStatus", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.OperationState", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.ResourceStatus", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.RevisionHistory", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SourceHydratorStatus", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } @@ -2997,6 +3017,13 @@ func schema_pkg_apis_application_v1alpha1_ClusterGenerator(ref common.ReferenceC }, }, }, + "flatList": { + SchemaProps: spec.SchemaProps{ + Description: "returns the clusters a single 'clusters' value in the template", + Type: []string{"boolean"}, + Format: "", + }, + }, }, }, }, @@ -3339,6 +3366,44 @@ func schema_pkg_apis_application_v1alpha1_ConnectionState(ref common.ReferenceCa } } +func schema_pkg_apis_application_v1alpha1_DrySource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DrySource specifies a location for dry \"don't repeat yourself\" manifest source information.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "repoURL": { + SchemaProps: spec.SchemaProps{ + Description: "RepoURL is the URL to the git repository that contains the application manifests", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "targetRevision": { + SchemaProps: spec.SchemaProps{ + Description: "TargetRevision defines the revision of the source to hydrate", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is a directory path within the Git repository where the manifests are located", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"repoURL", "targetRevision", "path"}, + }, + }, + } +} + func schema_pkg_apis_application_v1alpha1_DuckTypeGenerator(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -3780,9 +3845,17 @@ func schema_pkg_apis_application_v1alpha1_HealthStatus(ref common.ReferenceCallb Format: "", }, }, + "lastTransitionTime": { + SchemaProps: spec.SchemaProps{ + Description: "LastTransitionTime is the time the HealthStatus was set", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, }, }, }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, } } @@ -3951,6 +4024,93 @@ func schema_pkg_apis_application_v1alpha1_HostResourceInfo(ref common.ReferenceC } } +func schema_pkg_apis_application_v1alpha1_HydrateOperation(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HydrateOperation contains information about the most recent hydrate operation", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "startedAt": { + SchemaProps: spec.SchemaProps{ + Description: "StartedAt indicates when the hydrate operation started", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "finishedAt": { + SchemaProps: spec.SchemaProps{ + Description: "FinishedAt indicates when the hydrate operation finished", + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Time"), + }, + }, + "phase": { + SchemaProps: spec.SchemaProps{ + Description: "Phase indicates the status of the hydrate operation", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "Message contains a message describing the current status of the hydrate operation", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "drySHA": { + SchemaProps: spec.SchemaProps{ + Description: "DrySHA holds the resolved revision (sha) of the dry source as of the most recent reconciliation", + Type: []string{"string"}, + Format: "", + }, + }, + "hydratedSHA": { + SchemaProps: spec.SchemaProps{ + Description: "HydratedSHA holds the resolved revision (sha) of the hydrated source as of the most recent reconciliation", + Type: []string{"string"}, + Format: "", + }, + }, + "sourceHydrator": { + SchemaProps: spec.SchemaProps{ + Description: "SourceHydrator holds the hydrator config used for the hydrate operation", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SourceHydrator"), + }, + }, + }, + Required: []string{"phase", "message"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SourceHydrator", "k8s.io/apimachinery/pkg/apis/meta/v1.Time"}, + } +} + +func schema_pkg_apis_application_v1alpha1_HydrateTo(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "HydrateTo specifies a location to which hydrated manifests should be pushed as a \"staging area\" before being moved to the SyncSource. The RepoURL and Path are assumed based on the associated SyncSource config in the SourceHydrator.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "targetBranch": { + SchemaProps: spec.SchemaProps{ + Description: "TargetBranch is the branch to which hydrated manifests should be committed", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"targetBranch"}, + }, + }, + } +} + func schema_pkg_apis_application_v1alpha1_Info(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -7636,6 +7796,105 @@ func schema_pkg_apis_application_v1alpha1_SignatureKey(ref common.ReferenceCallb } } +func schema_pkg_apis_application_v1alpha1_SourceHydrator(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SourceHydrator specifies a dry \"don't repeat yourself\" source for manifests, a sync source from which to sync hydrated manifests, and an optional hydrateTo location to act as a \"staging\" aread for hydrated manifests.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "drySource": { + SchemaProps: spec.SchemaProps{ + Description: "DrySource specifies where the dry \"don't repeat yourself\" manifest source lives.", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.DrySource"), + }, + }, + "syncSource": { + SchemaProps: spec.SchemaProps{ + Description: "SyncSource specifies where to sync hydrated manifests from.", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncSource"), + }, + }, + "hydrateTo": { + SchemaProps: spec.SchemaProps{ + Description: "HydrateTo specifies an optional \"staging\" location to push hydrated manifests to. An external system would then have to move manifests to the SyncSource, e.g. by pull request.", + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HydrateTo"), + }, + }, + }, + Required: []string{"drySource", "syncSource"}, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.DrySource", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HydrateTo", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SyncSource"}, + } +} + +func schema_pkg_apis_application_v1alpha1_SourceHydratorStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SourceHydratorStatus contains information about the current state of source hydration", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "lastSuccessfulOperation": { + SchemaProps: spec.SchemaProps{ + Description: "LastSuccessfulOperation holds info about the most recent successful hydration", + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SuccessfulHydrateOperation"), + }, + }, + "currentOperation": { + SchemaProps: spec.SchemaProps{ + Description: "CurrentOperation holds the status of the hydrate operation", + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HydrateOperation"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.HydrateOperation", "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SuccessfulHydrateOperation"}, + } +} + +func schema_pkg_apis_application_v1alpha1_SuccessfulHydrateOperation(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SuccessfulHydrateOperation contains information about the most recent successful hydrate operation", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "drySHA": { + SchemaProps: spec.SchemaProps{ + Description: "DrySHA holds the resolved revision (sha) of the dry source as of the most recent reconciliation", + Type: []string{"string"}, + Format: "", + }, + }, + "hydratedSHA": { + SchemaProps: spec.SchemaProps{ + Description: "HydratedSHA holds the resolved revision (sha) of the hydrated source as of the most recent reconciliation", + Type: []string{"string"}, + Format: "", + }, + }, + "sourceHydrator": { + SchemaProps: spec.SchemaProps{ + Description: "SourceHydrator holds the hydrator config used for the hydrate operation", + Default: map[string]interface{}{}, + Ref: ref("github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SourceHydrator"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1.SourceHydrator"}, + } +} + func schema_pkg_apis_application_v1alpha1_SyncOperation(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -7958,6 +8217,36 @@ func schema_pkg_apis_application_v1alpha1_SyncPolicyAutomated(ref common.Referen } } +func schema_pkg_apis_application_v1alpha1_SyncSource(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "SyncSource specifies a location from which hydrated manifests may be synced. RepoURL is assumed based on the associated DrySource config in the SourceHydrator.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "targetBranch": { + SchemaProps: spec.SchemaProps{ + Description: "TargetBranch is the branch to which hydrated manifests should be committed", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "path": { + SchemaProps: spec.SchemaProps{ + Description: "Path is a directory path within the git repository where hydrated manifests should be committed to and synced from. If hydrateTo is set, this is just the path from which hydrated manifests will be synced.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"targetBranch", "path"}, + }, + }, + } +} + func schema_pkg_apis_application_v1alpha1_SyncStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/repository_types.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/repository_types.go index 5a30d24fb..047ae14b1 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/repository_types.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/repository_types.go @@ -5,6 +5,7 @@ import ( "net/url" "strings" + "github.com/argoproj/argo-cd/v2/common" "github.com/argoproj/argo-cd/v2/util/cert" "github.com/argoproj/argo-cd/v2/util/git" "github.com/argoproj/argo-cd/v2/util/helm" @@ -283,6 +284,34 @@ func (m *Repository) StringForLogging() string { return fmt.Sprintf("&Repository{Repo: %q, Type: %q, Name: %q, Project: %q}", m.Repo, m.Type, m.Name, m.Project) } +// Sanitized returns a copy of the Repository with sensitive information removed. +func (m *Repository) Sanitized() *Repository { + return &Repository{ + Repo: m.Repo, + Type: m.Type, + Name: m.Name, + Username: m.Username, + Insecure: m.IsInsecure(), + EnableLFS: m.EnableLFS, + EnableOCI: m.EnableOCI, + Proxy: m.Proxy, + NoProxy: m.NoProxy, + Project: m.Project, + ForceHttpBasicAuth: m.ForceHttpBasicAuth, + InheritedCreds: m.InheritedCreds, + GithubAppId: m.GithubAppId, + GithubAppInstallationId: m.GithubAppInstallationId, + GitHubAppEnterpriseBaseURL: m.GitHubAppEnterpriseBaseURL, + } +} + +func (m *Repository) Normalize() *Repository { + if m.Type == "" { + m.Type = common.DefaultRepoType + } + return m +} + // Repositories defines a list of Repository configurations type Repositories []*Repository diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go index c656db1fe..524f574f8 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/types.go @@ -2,10 +2,13 @@ package v1alpha1 import ( "encoding/json" + "errors" "fmt" + "maps" "math" "net" "net/http" + "net/url" "os" "path/filepath" "reflect" @@ -18,11 +21,12 @@ import ( "github.com/argoproj/gitops-engine/pkg/health" synccommon "github.com/argoproj/gitops-engine/pkg/sync/common" + "github.com/argoproj/gitops-engine/pkg/utils/kube" "github.com/robfig/cron/v3" log "github.com/sirupsen/logrus" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" @@ -35,8 +39,9 @@ import ( "k8s.io/client-go/tools/clientcmd/api" "sigs.k8s.io/yaml" + "github.com/argoproj/argo-cd/v2/util/rbac" + "github.com/argoproj/argo-cd/v2/common" - "github.com/argoproj/argo-cd/v2/util/collections" "github.com/argoproj/argo-cd/v2/util/env" "github.com/argoproj/argo-cd/v2/util/helm" utilhttp "github.com/argoproj/argo-cd/v2/util/http" @@ -84,6 +89,9 @@ type ApplicationSpec struct { // Sources is a reference to the location of the application's manifests or chart Sources ApplicationSources `json:"sources,omitempty" protobuf:"bytes,8,opt,name=sources"` + + // SourceHydrator provides a way to push hydrated manifests back to git before syncing them to the cluster. + SourceHydrator *SourceHydrator `json:"sourceHydrator,omitempty" protobuf:"bytes,9,opt,name=sourceHydrator"` } type IgnoreDifferences []ResourceIgnoreDifferences @@ -161,9 +169,8 @@ func (e Env) Envsubst(s string) string { // allow escaping $ with $$ if s == "$" { return "$" - } else { - return valByEnv[s] } + return valByEnv[s] }) } @@ -189,17 +196,19 @@ type ApplicationSource struct { Chart string `json:"chart,omitempty" protobuf:"bytes,12,opt,name=chart"` // Ref is reference to another source within sources field. This field will not be used if used with a `source` tag. Ref string `json:"ref,omitempty" protobuf:"bytes,13,opt,name=ref"` + // Name is used to refer to a source and is displayed in the UI. It is used in multi-source Applications. + Name string `json:"name,omitempty" protobuf:"bytes,14,opt,name=name"` } // ApplicationSources contains list of required information about the sources of an application type ApplicationSources []ApplicationSource -func (s ApplicationSources) Equals(other ApplicationSources) bool { - if len(s) != len(other) { +func (a ApplicationSources) Equals(other ApplicationSources) bool { + if len(a) != len(other) { return false } - for i := range s { - if !s[i].Equals(&other[i]) { + for i := range a { + if !a[i].Equals(&other[i]) { return false } } @@ -211,128 +220,153 @@ func (a ApplicationSources) IsZero() bool { return len(a) == 0 } -func (a *ApplicationSpec) GetSource() ApplicationSource { +func (spec *ApplicationSpec) GetSource() ApplicationSource { + if spec.SourceHydrator != nil { + return spec.SourceHydrator.GetSyncSource() + } // if Application has multiple sources, return the first source in sources - if a.HasMultipleSources() { - return a.Sources[0] + if spec.HasMultipleSources() { + return spec.Sources[0] } - if a.Source != nil { - return *a.Source + if spec.Source != nil { + return *spec.Source } return ApplicationSource{} } -func (a *ApplicationSpec) GetSources() ApplicationSources { - if a.HasMultipleSources() { - return a.Sources +// GetHydrateToSource returns the hydrateTo source if it exists, otherwise returns the sync source. +func (spec *ApplicationSpec) GetHydrateToSource() ApplicationSource { + if spec.SourceHydrator != nil { + targetRevision := spec.SourceHydrator.SyncSource.TargetBranch + if spec.SourceHydrator.HydrateTo != nil { + targetRevision = spec.SourceHydrator.HydrateTo.TargetBranch + } + return ApplicationSource{ + RepoURL: spec.SourceHydrator.DrySource.RepoURL, + Path: spec.SourceHydrator.SyncSource.Path, + TargetRevision: targetRevision, + } } - if a.Source != nil { - return ApplicationSources{*a.Source} + return ApplicationSource{} +} + +func (spec *ApplicationSpec) GetSources() ApplicationSources { + if spec.SourceHydrator != nil { + return ApplicationSources{spec.SourceHydrator.GetSyncSource()} + } + if spec.HasMultipleSources() { + return spec.Sources + } + if spec.Source != nil { + return ApplicationSources{*spec.Source} } return ApplicationSources{} } -func (a *ApplicationSpec) HasMultipleSources() bool { - return len(a.Sources) > 0 +func (spec *ApplicationSpec) HasMultipleSources() bool { + return spec.SourceHydrator == nil && len(spec.Sources) > 0 } -func (a *ApplicationSpec) GetSourcePtrByPosition(sourcePosition int) *ApplicationSource { +func (spec *ApplicationSpec) GetSourcePtrByPosition(sourcePosition int) *ApplicationSource { // if Application has multiple sources, return the first source in sources - return a.GetSourcePtrByIndex(sourcePosition - 1) + return spec.GetSourcePtrByIndex(sourcePosition - 1) } -func (a *ApplicationSpec) GetSourcePtrByIndex(sourceIndex int) *ApplicationSource { +func (spec *ApplicationSpec) GetSourcePtrByIndex(sourceIndex int) *ApplicationSource { + if spec.SourceHydrator != nil { + source := spec.SourceHydrator.GetSyncSource() + return &source + } // if Application has multiple sources, return the first source in sources - if a.HasMultipleSources() { + if spec.HasMultipleSources() { if sourceIndex > 0 { - return &a.Sources[sourceIndex] + return &spec.Sources[sourceIndex] } - return &a.Sources[0] + return &spec.Sources[0] } - return a.Source + return spec.Source } // AllowsConcurrentProcessing returns true if given application source can be processed concurrently -func (a *ApplicationSource) AllowsConcurrentProcessing() bool { - switch { +func (source *ApplicationSource) AllowsConcurrentProcessing() bool { // Kustomize with parameters requires changing kustomization.yaml file - case a.Kustomize != nil: - return a.Kustomize.AllowsConcurrentProcessing() + if source.Kustomize != nil { + return source.Kustomize.AllowsConcurrentProcessing() } return true } // IsRef returns true when the application source is of type Ref -func (a *ApplicationSource) IsRef() bool { - return a.Ref != "" +func (source *ApplicationSource) IsRef() bool { + return source.Ref != "" } // IsHelm returns true when the application source is of type Helm -func (a *ApplicationSource) IsHelm() bool { - return a.Chart != "" +func (source *ApplicationSource) IsHelm() bool { + return source.Chart != "" } // IsHelmOci returns true when the application source is of type Helm OCI -func (a *ApplicationSource) IsHelmOci() bool { - if a.Chart == "" { +func (source *ApplicationSource) IsHelmOci() bool { + if source.Chart == "" { return false } - return helm.IsHelmOciRepo(a.RepoURL) + return helm.IsHelmOciRepo(source.RepoURL) } // IsZero returns true if the application source is considered empty -func (a *ApplicationSource) IsZero() bool { - return a == nil || - a.RepoURL == "" && - a.Path == "" && - a.TargetRevision == "" && - a.Helm.IsZero() && - a.Kustomize.IsZero() && - a.Directory.IsZero() && - a.Plugin.IsZero() +func (source *ApplicationSource) IsZero() bool { + return source == nil || + source.RepoURL == "" && + source.Path == "" && + source.TargetRevision == "" && + source.Helm.IsZero() && + source.Kustomize.IsZero() && + source.Directory.IsZero() && + source.Plugin.IsZero() } // GetNamespaceOrDefault gets the static namespace configured in the source. If none is configured, returns the given // default. -func (a *ApplicationSource) GetNamespaceOrDefault(defaultNamespace string) string { - if a == nil { +func (source *ApplicationSource) GetNamespaceOrDefault(defaultNamespace string) string { + if source == nil { return defaultNamespace } - if a.Helm != nil && a.Helm.Namespace != "" { - return a.Helm.Namespace + if source.Helm != nil && source.Helm.Namespace != "" { + return source.Helm.Namespace } - if a.Kustomize != nil && a.Kustomize.Namespace != "" { - return a.Kustomize.Namespace + if source.Kustomize != nil && source.Kustomize.Namespace != "" { + return source.Kustomize.Namespace } return defaultNamespace } // GetKubeVersionOrDefault gets the static Kubernetes API version configured in the source. If none is configured, // returns the given default. -func (a *ApplicationSource) GetKubeVersionOrDefault(defaultKubeVersion string) string { - if a == nil { +func (source *ApplicationSource) GetKubeVersionOrDefault(defaultKubeVersion string) string { + if source == nil { return defaultKubeVersion } - if a.Helm != nil && a.Helm.KubeVersion != "" { - return a.Helm.KubeVersion + if source.Helm != nil && source.Helm.KubeVersion != "" { + return source.Helm.KubeVersion } - if a.Kustomize != nil && a.Kustomize.KubeVersion != "" { - return a.Kustomize.KubeVersion + if source.Kustomize != nil && source.Kustomize.KubeVersion != "" { + return source.Kustomize.KubeVersion } return defaultKubeVersion } // GetAPIVersionsOrDefault gets the static API versions list configured in the source. If none is configured, returns // the given default. -func (a *ApplicationSource) GetAPIVersionsOrDefault(defaultAPIVersions []string) []string { - if a == nil { +func (source *ApplicationSource) GetAPIVersionsOrDefault(defaultAPIVersions []string) []string { + if source == nil { return defaultAPIVersions } - if a.Helm != nil && len(a.Helm.APIVersions) > 0 { - return a.Helm.APIVersions + if source.Helm != nil && len(source.Helm.APIVersions) > 0 { + return source.Helm.APIVersions } - if a.Kustomize != nil && len(a.Kustomize.APIVersions) > 0 { - return a.Kustomize.APIVersions + if source.Kustomize != nil && len(source.Kustomize.APIVersions) > 0 { + return source.Kustomize.APIVersions } return defaultAPIVersions } @@ -347,6 +381,82 @@ const ( ApplicationSourceTypePlugin ApplicationSourceType = "Plugin" ) +// SourceHydrator specifies a dry "don't repeat yourself" source for manifests, a sync source from which to sync +// hydrated manifests, and an optional hydrateTo location to act as a "staging" aread for hydrated manifests. +type SourceHydrator struct { + // DrySource specifies where the dry "don't repeat yourself" manifest source lives. + DrySource DrySource `json:"drySource" protobuf:"bytes,1,name=drySource"` + // SyncSource specifies where to sync hydrated manifests from. + SyncSource SyncSource `json:"syncSource" protobuf:"bytes,2,name=syncSource"` + // HydrateTo specifies an optional "staging" location to push hydrated manifests to. An external system would then + // have to move manifests to the SyncSource, e.g. by pull request. + HydrateTo *HydrateTo `json:"hydrateTo,omitempty" protobuf:"bytes,3,opt,name=hydrateTo"` +} + +// GetSyncSource gets the source from which we should sync when a source hydrator is configured. +func (s SourceHydrator) GetSyncSource() ApplicationSource { + return ApplicationSource{ + // Pull the RepoURL from the dry source. The SyncSource's RepoURL is assumed to be the same. + RepoURL: s.DrySource.RepoURL, + Path: s.SyncSource.Path, + TargetRevision: s.SyncSource.TargetBranch, + } +} + +// GetDrySource gets the dry source when a source hydrator is configured. +func (s SourceHydrator) GetDrySource() ApplicationSource { + return ApplicationSource{ + RepoURL: s.DrySource.RepoURL, + Path: s.DrySource.Path, + TargetRevision: s.DrySource.TargetRevision, + } +} + +// DeepEquals returns true if the SourceHydrator is deeply equal to the given SourceHydrator. +func (s SourceHydrator) DeepEquals(hydrator SourceHydrator) bool { + return s.DrySource == hydrator.DrySource && s.SyncSource == hydrator.SyncSource && s.HydrateTo.DeepEquals(hydrator.HydrateTo) +} + +// DrySource specifies a location for dry "don't repeat yourself" manifest source information. +type DrySource struct { + // RepoURL is the URL to the git repository that contains the application manifests + RepoURL string `json:"repoURL" protobuf:"bytes,1,name=repoURL"` + // TargetRevision defines the revision of the source to hydrate + TargetRevision string `json:"targetRevision" protobuf:"bytes,2,name=targetRevision"` + // Path is a directory path within the Git repository where the manifests are located + Path string `json:"path" protobuf:"bytes,3,name=path"` +} + +// SyncSource specifies a location from which hydrated manifests may be synced. RepoURL is assumed based on the +// associated DrySource config in the SourceHydrator. +type SyncSource struct { + // TargetBranch is the branch to which hydrated manifests should be committed + TargetBranch string `json:"targetBranch" protobuf:"bytes,1,name=targetBranch"` + // Path is a directory path within the git repository where hydrated manifests should be committed to and synced + // from. If hydrateTo is set, this is just the path from which hydrated manifests will be synced. + Path string `json:"path" protobuf:"bytes,2,name=path"` +} + +// HydrateTo specifies a location to which hydrated manifests should be pushed as a "staging area" before being moved to +// the SyncSource. The RepoURL and Path are assumed based on the associated SyncSource config in the SourceHydrator. +type HydrateTo struct { + // TargetBranch is the branch to which hydrated manifests should be committed + TargetBranch string `json:"targetBranch" protobuf:"bytes,1,name=targetBranch"` +} + +// DeepEquals returns true if the HydrateTo is deeply equal to the given HydrateTo. +func (in *HydrateTo) DeepEquals(to *HydrateTo) bool { + if in == nil { + return to == nil + } + if to == nil { + // We already know in is not nil. + return false + } + // Compare de-referenced structs. + return *in == *to +} + // RefreshType specifies how to refresh the sources of a given application type RefreshType string @@ -355,6 +465,13 @@ const ( RefreshTypeHard RefreshType = "hard" ) +type HydrateType string + +const ( + // HydrateTypeNormal is a normal hydration + HydrateTypeNormal HydrateType = "normal" +) + type RefTarget struct { Repo Repository `protobuf:"bytes,1,opt,name=repo"` TargetRevision string `protobuf:"bytes,2,opt,name=targetRevision"` @@ -395,6 +512,10 @@ type ApplicationSourceHelm struct { // APIVersions specifies the Kubernetes resource API versions to pass to Helm when templating manifests. By default, // Argo CD uses the API versions of the target cluster. The format is [group/]version/kind. APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,13,opt,name=apiVersions"` + // SkipTests skips test manifest installation step (Helm's --skip-tests). + SkipTests bool `json:"skipTests,omitempty" protobuf:"bytes,14,opt,name=skipTests"` + // SkipSchemaValidation skips JSON schema validation (Helm's --skip-schema-validation) + SkipSchemaValidation bool `json:"skipSchemaValidation,omitempty" protobuf:"bytes,15,opt,name=skipSchemaValidation"` } // HelmParameter is a parameter that's passed to helm template during manifest generation @@ -444,39 +565,39 @@ func NewHelmFileParameter(text string) (*HelmFileParameter, error) { // AddParameter adds a HelmParameter to the application source. If a parameter with the same name already // exists, its value will be overwritten. Otherwise, the HelmParameter will be appended as a new entry. -func (in *ApplicationSourceHelm) AddParameter(p HelmParameter) { +func (ash *ApplicationSourceHelm) AddParameter(p HelmParameter) { found := false - for i, cp := range in.Parameters { + for i, cp := range ash.Parameters { if cp.Name == p.Name { found = true - in.Parameters[i] = p + ash.Parameters[i] = p break } } if !found { - in.Parameters = append(in.Parameters, p) + ash.Parameters = append(ash.Parameters, p) } } // AddFileParameter adds a HelmFileParameter to the application source. If a file parameter with the same name already // exists, its value will be overwritten. Otherwise, the HelmFileParameter will be appended as a new entry. -func (in *ApplicationSourceHelm) AddFileParameter(p HelmFileParameter) { +func (ash *ApplicationSourceHelm) AddFileParameter(p HelmFileParameter) { found := false - for i, cp := range in.FileParameters { + for i, cp := range ash.FileParameters { if cp.Name == p.Name { found = true - in.FileParameters[i] = p + ash.FileParameters[i] = p break } } if !found { - in.FileParameters = append(in.FileParameters, p) + ash.FileParameters = append(ash.FileParameters, p) } } // IsZero Returns true if the Helm options in an application source are considered zero -func (h *ApplicationSourceHelm) IsZero() bool { - return h == nil || (h.Version == "") && (h.ReleaseName == "") && len(h.ValueFiles) == 0 && len(h.Parameters) == 0 && len(h.FileParameters) == 0 && h.ValuesIsEmpty() && !h.PassCredentials && !h.IgnoreMissingValueFiles && !h.SkipCrds && h.KubeVersion == "" && len(h.APIVersions) == 0 && h.Namespace == "" +func (ash *ApplicationSourceHelm) IsZero() bool { + return ash == nil || (ash.Version == "") && (ash.ReleaseName == "") && len(ash.ValueFiles) == 0 && len(ash.Parameters) == 0 && len(ash.FileParameters) == 0 && ash.ValuesIsEmpty() && !ash.PassCredentials && !ash.IgnoreMissingValueFiles && !ash.SkipCrds && !ash.SkipTests && !ash.SkipSchemaValidation && ash.KubeVersion == "" && len(ash.APIVersions) == 0 && ash.Namespace == "" } // KustomizeImage represents a Kustomize image definition in the format [old_image_name=]: @@ -564,14 +685,13 @@ type KustomizeReplicas []KustomizeReplica // If parsing error occurs, returns 0 and error. func (kr KustomizeReplica) GetIntCount() (int, error) { if kr.Count.Type == intstr.String { - if count, err := strconv.Atoi(kr.Count.StrVal); err != nil { + count, err := strconv.Atoi(kr.Count.StrVal) + if err != nil { return 0, fmt.Errorf("expected integer value for count. Received: %s", kr.Count.StrVal) - } else { - return count, nil } - } else { - return kr.Count.IntValue(), nil + return count, nil } + return kr.Count.IntValue(), nil } // NewKustomizeReplica parses a string in format name=count into a KustomizeReplica object and returns it @@ -701,9 +821,8 @@ func NewJsonnetVar(s string, code bool) JsonnetVar { parts := strings.SplitN(s, "=", 2) if len(parts) == 2 { return JsonnetVar{Name: parts[0], Value: parts[1], Code: code} - } else { - return JsonnetVar{Name: s, Code: code} } + return JsonnetVar{Name: s, Code: code} } // ApplicationSourceJsonnet holds options specific to applications of type Jsonnet @@ -815,7 +934,7 @@ type ApplicationSourcePluginParameter struct { // Name is the name identifying a parameter. Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` // String_ is the value of a string type parameter. - String_ *string `json:"string,omitempty" protobuf:"bytes,5,opt,name=string"` + String_ *string `json:"string,omitempty" protobuf:"bytes,5,opt,name=string"` //nolint:revive //FIXME(var-naming) // Map is the value of a map type parameter. *OptionalMap `json:",omitempty" protobuf:"bytes,3,rep,name=map"` // Array is the value of an array type parameter. @@ -839,7 +958,7 @@ func (p ApplicationSourcePluginParameter) Equals(other ApplicationSourcePluginPa // // There are efforts to change things upstream, but nothing has been merged yet. See https://github.com/golang/go/issues/37711 func (p ApplicationSourcePluginParameter) MarshalJSON() ([]byte, error) { - out := map[string]interface{}{} + out := map[string]any{} out["name"] = p.Name if p.String_ != nil { out["string"] = p.String_ @@ -896,12 +1015,12 @@ func (p ApplicationSourcePluginParameters) Environ() ([]string, error) { if err != nil { return nil, fmt.Errorf("failed to marshal plugin parameters: %w", err) } - jsonParam := fmt.Sprintf("ARGOCD_APP_PARAMETERS=%s", string(out)) + jsonParam := "ARGOCD_APP_PARAMETERS=" + string(out) env := []string{jsonParam} for _, param := range p { - envBaseName := fmt.Sprintf("PARAM_%s", escaped(param.Name)) + envBaseName := "PARAM_" + escaped(param.Name) if param.String_ != nil { env = append(env, fmt.Sprintf("%s=%s", envBaseName, *param.String_)) } @@ -1043,18 +1162,76 @@ type ApplicationStatus struct { SourceTypes []ApplicationSourceType `json:"sourceTypes,omitempty" protobuf:"bytes,12,opt,name=sourceTypes"` // ControllerNamespace indicates the namespace in which the application controller is located ControllerNamespace string `json:"controllerNamespace,omitempty" protobuf:"bytes,13,opt,name=controllerNamespace"` + // SourceHydrator stores information about the current state of source hydration + SourceHydrator SourceHydratorStatus `json:"sourceHydrator,omitempty" protobuf:"bytes,14,opt,name=sourceHydrator"` +} + +// SourceHydratorStatus contains information about the current state of source hydration +type SourceHydratorStatus struct { + // LastSuccessfulOperation holds info about the most recent successful hydration + LastSuccessfulOperation *SuccessfulHydrateOperation `json:"lastSuccessfulOperation,omitempty" protobuf:"bytes,1,opt,name=lastSuccessfulOperation"` + // CurrentOperation holds the status of the hydrate operation + CurrentOperation *HydrateOperation `json:"currentOperation,omitempty" protobuf:"bytes,2,opt,name=currentOperation"` } +func (status *ApplicationStatus) FindResource(key kube.ResourceKey) (*ResourceStatus, bool) { + for i := range status.Resources { + res := status.Resources[i] + if kube.NewResourceKey(res.Group, res.Kind, res.Namespace, res.Name) == key { + return &res, true + } + } + return nil, false +} + +// HydrateOperation contains information about the most recent hydrate operation +type HydrateOperation struct { + // StartedAt indicates when the hydrate operation started + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"` + // FinishedAt indicates when the hydrate operation finished + FinishedAt *metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,2,opt,name=finishedAt"` + // Phase indicates the status of the hydrate operation + Phase HydrateOperationPhase `json:"phase" protobuf:"bytes,3,opt,name=phase"` + // Message contains a message describing the current status of the hydrate operation + Message string `json:"message" protobuf:"bytes,4,opt,name=message"` + // DrySHA holds the resolved revision (sha) of the dry source as of the most recent reconciliation + DrySHA string `json:"drySHA,omitempty" protobuf:"bytes,5,opt,name=drySHA"` + // HydratedSHA holds the resolved revision (sha) of the hydrated source as of the most recent reconciliation + HydratedSHA string `json:"hydratedSHA,omitempty" protobuf:"bytes,6,opt,name=hydratedSHA"` + // SourceHydrator holds the hydrator config used for the hydrate operation + SourceHydrator SourceHydrator `json:"sourceHydrator,omitempty" protobuf:"bytes,7,opt,name=sourceHydrator"` +} + +// SuccessfulHydrateOperation contains information about the most recent successful hydrate operation +type SuccessfulHydrateOperation struct { + // DrySHA holds the resolved revision (sha) of the dry source as of the most recent reconciliation + DrySHA string `json:"drySHA,omitempty" protobuf:"bytes,5,opt,name=drySHA"` + // HydratedSHA holds the resolved revision (sha) of the hydrated source as of the most recent reconciliation + HydratedSHA string `json:"hydratedSHA,omitempty" protobuf:"bytes,6,opt,name=hydratedSHA"` + // SourceHydrator holds the hydrator config used for the hydrate operation + SourceHydrator SourceHydrator `json:"sourceHydrator,omitempty" protobuf:"bytes,7,opt,name=sourceHydrator"` +} + +// HydrateOperationPhase indicates the status of a hydrate operation +// +kubebuilder:validation:Enum=Hydrating;Failed;Hydrated +type HydrateOperationPhase string + +const ( + HydrateOperationPhaseHydrating HydrateOperationPhase = "Hydrating" + HydrateOperationPhaseFailed HydrateOperationPhase = "Failed" + HydrateOperationPhaseHydrated HydrateOperationPhase = "Hydrated" +) + // GetRevisions will return the current revision associated with the Application. // If app has multisources, it will return all corresponding revisions preserving // order from the app.spec.sources. If app has only one source, it will return a // single revision in the list. -func (a *ApplicationStatus) GetRevisions() []string { +func (status *ApplicationStatus) GetRevisions() []string { revisions := []string{} - if len(a.Sync.Revisions) > 0 { - revisions = a.Sync.Revisions - } else if a.Sync.Revision != "" { - revisions = append(revisions, a.Sync.Revision) + if len(status.Sync.Revisions) > 0 { + revisions = status.Sync.Revisions + } else if status.Sync.Revision != "" { + revisions = append(revisions, status.Sync.Revision) } return revisions } @@ -1350,15 +1527,15 @@ type SyncStrategy struct { // Force returns true if the sync strategy specifies to perform a forced sync func (m *SyncStrategy) Force() bool { - if m == nil { + switch { + case m == nil: return false - } else if m.Apply != nil { + case m.Apply != nil: return m.Apply.Force - } else if m.Hook != nil { + case m.Hook != nil: return m.Hook.Force - } else { - return false } + return false } // SyncStrategyApply uses `kubectl apply` to perform the apply @@ -1603,6 +1780,8 @@ type HealthStatus struct { Status health.HealthStatusCode `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` // Message is a human-readable informational message describing the health status Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // LastTransitionTime is the time the HealthStatus was set or updated + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` } // InfoItem contains arbitrary, human readable information about an application @@ -1616,29 +1795,29 @@ type InfoItem struct { // ResourceNetworkingInfo holds networking resource related information // TODO: describe members of this type type ResourceNetworkingInfo struct { - TargetLabels map[string]string `json:"targetLabels,omitempty" protobuf:"bytes,1,opt,name=targetLabels"` - TargetRefs []ResourceRef `json:"targetRefs,omitempty" protobuf:"bytes,2,opt,name=targetRefs"` - Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,3,opt,name=labels"` - Ingress []v1.LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,4,opt,name=ingress"` + TargetLabels map[string]string `json:"targetLabels,omitempty" protobuf:"bytes,1,opt,name=targetLabels"` + TargetRefs []ResourceRef `json:"targetRefs,omitempty" protobuf:"bytes,2,opt,name=targetRefs"` + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,3,opt,name=labels"` + Ingress []corev1.LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,4,opt,name=ingress"` // ExternalURLs holds list of URLs which should be available externally. List is populated for ingress resources using rules hostnames. ExternalURLs []string `json:"externalURLs,omitempty" protobuf:"bytes,5,opt,name=externalURLs"` } // TODO: describe this type type HostResourceInfo struct { - ResourceName v1.ResourceName `json:"resourceName,omitempty" protobuf:"bytes,1,name=resourceName"` - RequestedByApp int64 `json:"requestedByApp,omitempty" protobuf:"bytes,2,name=requestedByApp"` - RequestedByNeighbors int64 `json:"requestedByNeighbors,omitempty" protobuf:"bytes,3,name=requestedByNeighbors"` - Capacity int64 `json:"capacity,omitempty" protobuf:"bytes,4,name=capacity"` + ResourceName corev1.ResourceName `json:"resourceName,omitempty" protobuf:"bytes,1,name=resourceName"` + RequestedByApp int64 `json:"requestedByApp,omitempty" protobuf:"bytes,2,name=requestedByApp"` + RequestedByNeighbors int64 `json:"requestedByNeighbors,omitempty" protobuf:"bytes,3,name=requestedByNeighbors"` + Capacity int64 `json:"capacity,omitempty" protobuf:"bytes,4,name=capacity"` } // HostInfo holds host name and resources metrics // TODO: describe purpose of this type // TODO: describe members of this type type HostInfo struct { - Name string `json:"name,omitempty" protobuf:"bytes,1,name=name"` - ResourcesInfo []HostResourceInfo `json:"resourcesInfo,omitempty" protobuf:"bytes,2,name=resourcesInfo"` - SystemInfo v1.NodeSystemInfo `json:"systemInfo,omitempty" protobuf:"bytes,3,opt,name=systemInfo"` + Name string `json:"name,omitempty" protobuf:"bytes,1,name=name"` + ResourcesInfo []HostResourceInfo `json:"resourcesInfo,omitempty" protobuf:"bytes,2,name=resourcesInfo"` + SystemInfo corev1.NodeSystemInfo `json:"systemInfo,omitempty" protobuf:"bytes,3,opt,name=systemInfo"` } // ApplicationTree holds nodes which belongs to the application @@ -1758,7 +1937,8 @@ func (t *ApplicationTree) GetSummary(app *Application) ApplicationSummary { urlsSet[v] = true } } - urls := make([]string, 0) + + urls := make([]string, 0, len(urlsSet)) for url := range urlsSet { urls = append(urls, url) } @@ -1816,16 +1996,17 @@ func (n *ResourceNode) GroupKindVersion() schema.GroupVersionKind { // ResourceStatus holds the current sync and health status of a resource // TODO: describe members of this type type ResourceStatus struct { - Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` - Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` - Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` - Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` - Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` - Status SyncStatusCode `json:"status,omitempty" protobuf:"bytes,6,opt,name=status"` - Health *HealthStatus `json:"health,omitempty" protobuf:"bytes,7,opt,name=health"` - Hook bool `json:"hook,omitempty" protobuf:"bytes,8,opt,name=hook"` - RequiresPruning bool `json:"requiresPruning,omitempty" protobuf:"bytes,9,opt,name=requiresPruning"` - SyncWave int64 `json:"syncWave,omitempty" protobuf:"bytes,10,opt,name=syncWave"` + Group string `json:"group,omitempty" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version,omitempty" protobuf:"bytes,2,opt,name=version"` + Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` + Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"` + Status SyncStatusCode `json:"status,omitempty" protobuf:"bytes,6,opt,name=status"` + Health *HealthStatus `json:"health,omitempty" protobuf:"bytes,7,opt,name=health"` + Hook bool `json:"hook,omitempty" protobuf:"bytes,8,opt,name=hook"` + RequiresPruning bool `json:"requiresPruning,omitempty" protobuf:"bytes,9,opt,name=requiresPruning"` + SyncWave int64 `json:"syncWave,omitempty" protobuf:"bytes,10,opt,name=syncWave"` + RequiresDeletionConfirmation bool `json:"requiresDeletionConfirmation,omitempty" protobuf:"bytes,11,opt,name=requiresDeletionConfirmation"` } // GroupVersionKind returns the GVK schema type for given resource status @@ -1945,11 +2126,11 @@ func (c *Cluster) Equals(other *Cluster) bool { return false } - if !collections.StringMapsEqual(c.Annotations, other.Annotations) { + if !maps.Equal(c.Annotations, other.Annotations) { return false } - if !collections.StringMapsEqual(c.Labels, other.Labels) { + if !maps.Equal(c.Labels, other.Labels) { return false } @@ -1974,7 +2155,7 @@ func (c *ClusterInfo) GetKubeVersion() string { return c.ServerVersion } -func (c *ClusterInfo) GetApiVersions() []string { +func (c *ClusterInfo) GetApiVersions() []string { //nolint:revive //FIXME(var-naming) return c.APIVersions } @@ -2045,6 +2226,12 @@ type ClusterConfig struct { // ExecProviderConfig contains configuration for an exec provider ExecProviderConfig *ExecProviderConfig `json:"execProviderConfig,omitempty" protobuf:"bytes,6,opt,name=execProviderConfig"` + + // DisableCompression bypasses automatic GZip compression requests to the server. + DisableCompression bool `json:"disableCompression,omitempty" protobuf:"bytes,7,opt,name=disableCompression"` + + // ProxyURL is the URL to the proxy to be used for all requests send to the server + ProxyUrl string `json:"proxyUrl,omitempty" protobuf:"bytes,8,opt,name=proxyUrl"` //nolint:revive //FIXME(var-naming) } // TLSClientConfig contains settings to enable transport layer security @@ -2106,21 +2293,23 @@ type ResourceOverride struct { KnownTypeFields []KnownTypeField `protobuf:"bytes,4,opt,name=knownTypeFields"` } -// TODO: describe this method -func (s *ResourceOverride) UnmarshalJSON(data []byte) error { +// UnmarshalJSON unmarshals a JSON byte slice into a ResourceOverride object. +// It parses the raw input data and handles special processing for `IgnoreDifferences` +// and `IgnoreResourceUpdates` fields using YAML format. +func (ro *ResourceOverride) UnmarshalJSON(data []byte) error { raw := &rawResourceOverride{} if err := json.Unmarshal(data, &raw); err != nil { return err } - s.KnownTypeFields = raw.KnownTypeFields - s.HealthLua = raw.HealthLua - s.UseOpenLibs = raw.UseOpenLibs - s.Actions = raw.Actions - err := yaml.Unmarshal([]byte(raw.IgnoreDifferences), &s.IgnoreDifferences) + ro.KnownTypeFields = raw.KnownTypeFields + ro.HealthLua = raw.HealthLua + ro.UseOpenLibs = raw.UseOpenLibs + ro.Actions = raw.Actions + err := yaml.Unmarshal([]byte(raw.IgnoreDifferences), &ro.IgnoreDifferences) if err != nil { return err } - err = yaml.Unmarshal([]byte(raw.IgnoreResourceUpdates), &s.IgnoreResourceUpdates) + err = yaml.Unmarshal([]byte(raw.IgnoreResourceUpdates), &ro.IgnoreResourceUpdates) if err != nil { return err } @@ -2185,7 +2374,7 @@ type ResourceActionParam struct { Default string `json:"default,omitempty" protobuf:"bytes,4,opt,name=default"` } -// TODO: refactor to use rbacpolicy.ActionGet, rbacpolicy.ActionCreate, without import cycle +// TODO: refactor to use rbac.ActionGet, rbac.ActionCreate, without import cycle var validActions = map[string]bool{ "get": true, "create": true, @@ -2214,19 +2403,6 @@ func isValidAction(action string) bool { return false } -// TODO: same as validActions, refacotor to use rbacpolicy.ResourceApplications etc. -var validResources = map[string]bool{ - "applications": true, - "repositories": true, - "clusters": true, - "exec": true, - "logs": true, -} - -func isValidResource(resource string) bool { - return validResources[resource] -} - func isValidObject(proj string, object string) bool { // match against [/]/ objectRegexp, err := regexp.Compile(fmt.Sprintf(`^%s(/[*\w-.]+)?/[*\w-.]+$`, regexp.QuoteMeta(proj))) @@ -2246,8 +2422,8 @@ func validatePolicy(proj string, role string, policy string) error { } // resource resource := strings.Trim(policyComponents[2], " ") - if !isValidResource(resource) { - return status.Errorf(codes.InvalidArgument, "invalid policy rule '%s': project resource must be: 'applications', 'repositories' or 'clusters', not '%s'", policy, resource) + if !rbac.ProjectScoped[resource] { + return status.Errorf(codes.InvalidArgument, "invalid policy rule '%s': project resource must be: 'applications', 'applicationsets', 'repositories', 'exec', 'logs' or 'clusters', not '%s'", policy, resource) } // action action := strings.Trim(policyComponents[3], " ") @@ -2384,24 +2560,24 @@ type SyncWindow struct { } // HasWindows returns true if SyncWindows has one or more SyncWindow -func (s *SyncWindows) HasWindows() bool { - return s != nil && len(*s) > 0 +func (w *SyncWindows) HasWindows() bool { + return w != nil && len(*w) > 0 } // Active returns a list of sync windows that are currently active -func (s *SyncWindows) Active() (*SyncWindows, error) { - return s.active(time.Now()) +func (w *SyncWindows) Active() (*SyncWindows, error) { + return w.active(time.Now()) } -func (s *SyncWindows) active(currentTime time.Time) (*SyncWindows, error) { +func (w *SyncWindows) active(currentTime time.Time) (*SyncWindows, error) { // If SyncWindows.Active() is called outside of a UTC locale, it should be // first converted to UTC before we scan through the SyncWindows. currentTime = currentTime.In(time.UTC) - if s.HasWindows() { + if w.HasWindows() { var active SyncWindows specParser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) - for _, w := range *s { + for _, w := range *w { schedule, sErr := specParser.Parse(w.Schedule) if sErr != nil { return nil, fmt.Errorf("cannot parse schedule '%s': %w", w.Schedule, sErr) @@ -2428,19 +2604,19 @@ func (s *SyncWindows) active(currentTime time.Time) (*SyncWindows, error) { // InactiveAllows will iterate over the SyncWindows and return all inactive allow windows // for the current time. If the current time is in an inactive allow window, syncs will // be denied. -func (s *SyncWindows) InactiveAllows() (*SyncWindows, error) { - return s.inactiveAllows(time.Now()) +func (w *SyncWindows) InactiveAllows() (*SyncWindows, error) { + return w.inactiveAllows(time.Now()) } -func (s *SyncWindows) inactiveAllows(currentTime time.Time) (*SyncWindows, error) { +func (w *SyncWindows) inactiveAllows(currentTime time.Time) (*SyncWindows, error) { // If SyncWindows.InactiveAllows() is called outside of a UTC locale, it should be // first converted to UTC before we scan through the SyncWindows. currentTime = currentTime.In(time.UTC) - if s.HasWindows() { + if w.HasWindows() { var inactive SyncWindows specParser := cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) - for _, w := range *s { + for _, w := range *w { if w.Kind == "allow" { schedule, sErr := specParser.Parse(w.Schedule) if sErr != nil { @@ -2477,9 +2653,9 @@ func (w *SyncWindow) scheduleOffsetByTimeZone() time.Duration { } // AddWindow adds a sync window with the given parameters to the AppProject -func (s *AppProjectSpec) AddWindow(knd string, sch string, dur string, app []string, ns []string, cl []string, ms bool, timeZone string) error { +func (spec *AppProjectSpec) AddWindow(knd string, sch string, dur string, app []string, ns []string, cl []string, ms bool, timeZone string) error { if len(knd) == 0 || len(sch) == 0 || len(dur) == 0 { - return fmt.Errorf("cannot create window: require kind, schedule, duration and one or more of applications, namespaces and clusters") + return errors.New("cannot create window: require kind, schedule, duration and one or more of applications, namespaces and clusters") } window := &SyncWindow{ @@ -2505,18 +2681,18 @@ func (s *AppProjectSpec) AddWindow(knd string, sch string, dur string, app []str return err } - s.SyncWindows = append(s.SyncWindows, window) + spec.SyncWindows = append(spec.SyncWindows, window) return nil } // DeleteWindow deletes a sync window with the given id from the AppProject -func (s *AppProjectSpec) DeleteWindow(id int) error { +func (spec *AppProjectSpec) DeleteWindow(id int) error { var exists bool - for i := range s.SyncWindows { + for i := range spec.SyncWindows { if i == id { exists = true - s.SyncWindows = append(s.SyncWindows[:i], s.SyncWindows[i+1:]...) + spec.SyncWindows = append(spec.SyncWindows[:i], spec.SyncWindows[i+1:]...) break } } @@ -2581,9 +2757,8 @@ func (w *SyncWindows) CanSync(isManual bool) (bool, error) { if hasActiveDeny { if isManual && manualEnabled { return true, nil - } else { - return false, nil } + return false, nil } if active.hasAllow() { @@ -2597,9 +2772,8 @@ func (w *SyncWindows) CanSync(isManual bool) (bool, error) { if inactiveAllows.HasWindows() { if isManual && inactiveAllows.manualEnabled() { return true, nil - } else { - return false, nil } + return false, nil } return true, nil @@ -2686,7 +2860,7 @@ func (w SyncWindow) active(currentTime time.Time) (bool, error) { // Update updates a sync window's settings with the given parameter func (w *SyncWindow) Update(s string, d string, a []string, n []string, c []string, tz string) error { if len(s) == 0 && len(d) == 0 && len(a) == 0 && len(n) == 0 && len(c) == 0 { - return fmt.Errorf("cannot update: require one or more of schedule, duration, application, namespace, or cluster") + return errors.New("cannot update: require one or more of schedule, duration, application, namespace, or cluster") } if len(s) > 0 { @@ -2740,10 +2914,10 @@ func (w *SyncWindow) Validate() error { } // DestinationClusters returns a list of cluster URLs allowed as destination in an AppProject -func (d AppProjectSpec) DestinationClusters() []string { +func (spec AppProjectSpec) DestinationClusters() []string { servers := make([]string, 0) - for _, d := range d.Destinations { + for _, d := range spec.Destinations { servers = append(servers, d.Server) } @@ -2836,6 +3010,22 @@ func (app *Application) IsRefreshRequested() (RefreshType, bool) { return refreshType, true } +// IsHydrateRequested returns whether hydration has been requested for an application +func (app *Application) IsHydrateRequested() bool { + annotations := app.GetAnnotations() + if annotations == nil { + return false + } + typeStr, ok := annotations[AnnotationKeyHydrate] + if !ok { + return false + } + if typeStr == string(HydrateTypeNormal) { + return true + } + return false +} + func (app *Application) HasPostDeleteFinalizer(stage ...string) bool { return getFinalizerIndex(app.ObjectMeta, strings.Join(append([]string{PostDeleteFinalizerName}, stage...), "/")) > -1 } @@ -3119,6 +3309,9 @@ func SetK8SConfigDefaults(config *rest.Config) error { DisableCompression: config.DisableCompression, IdleConnTimeout: K8sTCPIdleConnTimeout, }) + if config.Proxy != nil { + transport.Proxy = config.Proxy + } tr, err := rest.HTTPWrappersForConfig(config, transport) if err != nil { return err @@ -3142,11 +3335,27 @@ func SetK8SConfigDefaults(config *rest.Config) error { return nil } +// ParseProxyUrl returns a parsed url and verifies that schema is correct +func ParseProxyUrl(proxyUrl string) (*url.URL, error) { //nolint:revive //FIXME(var-naming) + u, err := url.Parse(proxyUrl) + if err != nil { + return nil, err + } + switch u.Scheme { + case "http", "https", "socks5": + default: + return nil, fmt.Errorf("Failed to parse proxy url, unsupported scheme %q, must be http, https, or socks5", u.Scheme) + } + return u, nil +} + // RawRestConfig returns a go-client REST config from cluster that might be serialized into the file using kube.WriteKubeConfig method. -func (c *Cluster) RawRestConfig() *rest.Config { +func (c *Cluster) RawRestConfig() (*rest.Config, error) { var config *rest.Config var err error - if c.Server == KubernetesInternalAPIServerAddr && env.ParseBoolFromEnv(EnvVarFakeInClusterConfig, false) { + + switch { + case c.Server == KubernetesInternalAPIServerAddr && env.ParseBoolFromEnv(EnvVarFakeInClusterConfig, false): conf, exists := os.LookupEnv("KUBECONFIG") if exists { config, err = clientcmd.BuildConfigFromFlags("", conf) @@ -3158,9 +3367,9 @@ func (c *Cluster) RawRestConfig() *rest.Config { } config, err = clientcmd.BuildConfigFromFlags("", filepath.Join(homeDir, ".kube", "config")) } - } else if c.Server == KubernetesInternalAPIServerAddr && c.Config.Username == "" && c.Config.Password == "" && c.Config.BearerToken == "" { + case c.Server == KubernetesInternalAPIServerAddr && c.Config.Username == "" && c.Config.Password == "" && c.Config.BearerToken == "": config, err = rest.InClusterConfig() - } else if c.Server == KubernetesInternalAPIServerAddr { + case c.Server == KubernetesInternalAPIServerAddr: config, err = rest.InClusterConfig() if err == nil { config.Username = c.Config.Username @@ -3168,7 +3377,7 @@ func (c *Cluster) RawRestConfig() *rest.Config { config.BearerToken = c.Config.BearerToken config.BearerTokenFile = "" } - } else { + default: tlsClientConfig := rest.TLSClientConfig{ Insecure: c.Config.TLSClientConfig.Insecure, ServerName: c.Config.TLSClientConfig.ServerName, @@ -3176,7 +3385,8 @@ func (c *Cluster) RawRestConfig() *rest.Config { KeyData: c.Config.TLSClientConfig.KeyData, CAData: c.Config.TLSClientConfig.CAData, } - if c.Config.AWSAuthConfig != nil { + switch { + case c.Config.AWSAuthConfig != nil: args := []string{"aws", "--cluster-name", c.Config.AWSAuthConfig.ClusterName} if c.Config.AWSAuthConfig.RoleARN != "" { args = append(args, "--role-arn", c.Config.AWSAuthConfig.RoleARN) @@ -3194,7 +3404,7 @@ func (c *Cluster) RawRestConfig() *rest.Config { InteractiveMode: api.NeverExecInteractiveMode, }, } - } else if c.Config.ExecProviderConfig != nil { + case c.Config.ExecProviderConfig != nil: var env []api.ExecEnvVar if c.Config.ExecProviderConfig.Env != nil { for key, value := range c.Config.ExecProviderConfig.Env { @@ -3216,7 +3426,7 @@ func (c *Cluster) RawRestConfig() *rest.Config { InteractiveMode: api.NeverExecInteractiveMode, }, } - } else { + default: config = &rest.Config{ Host: c.Server, Username: c.Config.Username, @@ -3227,22 +3437,33 @@ func (c *Cluster) RawRestConfig() *rest.Config { } } if err != nil { - panic(fmt.Sprintf("Unable to create K8s REST config: %v", err)) + return nil, fmt.Errorf("Unable to create K8s REST config: %w", err) } + if c.Config.ProxyUrl != "" { + u, err := ParseProxyUrl(c.Config.ProxyUrl) + if err != nil { + return nil, fmt.Errorf("Unable to create K8s REST config, can`t parse proxy url: %w", err) + } + config.Proxy = http.ProxyURL(u) + } + config.DisableCompression = c.Config.DisableCompression config.Timeout = K8sServerSideTimeout config.QPS = K8sClientConfigQPS config.Burst = K8sClientConfigBurst - return config + return config, nil } // RESTConfig returns a go-client REST config from cluster with tuned throttling and HTTP client settings. -func (c *Cluster) RESTConfig() *rest.Config { - config := c.RawRestConfig() - err := SetK8SConfigDefaults(config) +func (c *Cluster) RESTConfig() (*rest.Config, error) { + config, err := c.RawRestConfig() + if err != nil { + return nil, fmt.Errorf("Unable to get K8s RAW REST config: %w", err) + } + err = SetK8SConfigDefaults(config) if err != nil { - panic(fmt.Sprintf("Unable to apply K8s REST config defaults: %v", err)) + return nil, fmt.Errorf("Unable to apply K8s REST config defaults: %w", err) } - return config + return config, nil } // UnmarshalToUnstructured unmarshals a resource representation in JSON to unstructured data @@ -3314,28 +3535,51 @@ func (d *ApplicationDestination) MarshalJSON() ([]byte, error) { // tracking values, i.e. in the format _. When the namespace // of the application is similar to the value of defaultNs, only the name of // the application is returned to keep backwards compatibility. -func (a *Application) InstanceName(defaultNs string) string { +func (app *Application) InstanceName(defaultNs string) string { // When app has no namespace set, or the namespace is the default ns, we // return just the application name - if a.Namespace == "" || a.Namespace == defaultNs { - return a.Name + if app.Namespace == "" || app.Namespace == defaultNs { + return app.Name } - return a.Namespace + "_" + a.Name + return app.Namespace + "_" + app.Name } // QualifiedName returns the full qualified name of the application, including // the name of the namespace it is created in delimited by a forward slash, // i.e. / -func (a *Application) QualifiedName() string { - if a.Namespace == "" { - return a.Name - } else { - return a.Namespace + "/" + a.Name +func (app *Application) QualifiedName() string { + if app.Namespace == "" { + return app.Name } + return app.Namespace + "/" + app.Name } // RBACName returns the full qualified RBAC resource name for the application // in a backwards-compatible way. -func (a *Application) RBACName(defaultNS string) string { - return security.RBACName(defaultNS, a.Spec.GetProject(), a.Namespace, a.Name) +func (app *Application) RBACName(defaultNS string) string { + return security.RBACName(defaultNS, app.Spec.GetProject(), app.Namespace, app.Name) +} + +// GetAnnotation returns the value of the specified annotation if it exists, +// e.g., a.GetAnnotation("argocd.argoproj.io/manifest-generate-paths"). +// If the annotation does not exist, it returns an empty string. +func (app *Application) GetAnnotation(annotation string) string { + v, exists := app.Annotations[annotation] + if !exists { + return "" + } + + return v +} + +func (a *Application) IsDeletionConfirmed(since time.Time) bool { + val := a.GetAnnotation(synccommon.AnnotationDeletionApproved) + if val == "" { + return false + } + parsedVal, err := time.Parse(time.RFC3339, val) + if err != nil { + return false + } + return parsedVal.After(since) || parsedVal.Equal(since) } diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go index 7e1d69473..9faf5d3a7 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1/zz_generated.deepcopy.go @@ -1,6 +1,19 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated +/* +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ // Code generated by deepcopy-gen. DO NOT EDIT. package v1alpha1 @@ -1086,6 +1099,11 @@ func (in *ApplicationSourceHelm) DeepCopyInto(out *ApplicationSourceHelm) { *out = new(runtime.RawExtension) (*in).DeepCopyInto(*out) } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -1169,6 +1187,11 @@ func (in *ApplicationSourceKustomize) DeepCopyInto(out *ApplicationSourceKustomi *out = make([]string, len(*in)) copy(*out, *in) } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -1329,6 +1352,11 @@ func (in *ApplicationSpec) DeepCopyInto(out *ApplicationSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.SourceHydrator != nil { + in, out := &in.SourceHydrator, &out.SourceHydrator + *out = new(SourceHydrator) + (*in).DeepCopyInto(*out) + } return } @@ -1353,7 +1381,7 @@ func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) { } } in.Sync.DeepCopyInto(&out.Sync) - out.Health = in.Health + in.Health.DeepCopyInto(&out.Health) if in.History != nil { in, out := &in.History, &out.History *out = make(RevisionHistories, len(*in)) @@ -1387,6 +1415,7 @@ func (in *ApplicationStatus) DeepCopyInto(out *ApplicationStatus) { *out = make([]ApplicationSourceType, len(*in)) copy(*out, *in) } + in.SourceHydrator.DeepCopyInto(&out.SourceHydrator) return } @@ -1522,6 +1551,27 @@ func (in *BasicAuthBitbucketServer) DeepCopy() *BasicAuthBitbucketServer { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BearerTokenBitbucket) DeepCopyInto(out *BearerTokenBitbucket) { + *out = *in + if in.TokenRef != nil { + in, out := &in.TokenRef, &out.TokenRef + *out = new(SecretRef) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BearerTokenBitbucket. +func (in *BearerTokenBitbucket) DeepCopy() *BearerTokenBitbucket { + if in == nil { + return nil + } + out := new(BearerTokenBitbucket) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BearerTokenBitbucketCloud) DeepCopyInto(out *BearerTokenBitbucketCloud) { *out = *in @@ -1826,6 +1876,22 @@ func (in *ConfigManagementPlugin) DeepCopy() *ConfigManagementPlugin { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapKeyRef) DeepCopyInto(out *ConfigMapKeyRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeyRef. +func (in *ConfigMapKeyRef) DeepCopy() *ConfigMapKeyRef { + if in == nil { + return nil + } + out := new(ConfigMapKeyRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConnectionState) DeepCopyInto(out *ConnectionState) { *out = *in @@ -1846,6 +1912,22 @@ func (in *ConnectionState) DeepCopy() *ConnectionState { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DrySource) DeepCopyInto(out *DrySource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DrySource. +func (in *DrySource) DeepCopy() *DrySource { + if in == nil { + return nil + } + out := new(DrySource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DuckTypeGenerator) DeepCopyInto(out *DuckTypeGenerator) { *out = *in @@ -2074,6 +2156,10 @@ func (in *GnuPGPublicKeyList) DeepCopy() *GnuPGPublicKeyList { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HealthStatus) DeepCopyInto(out *HealthStatus) { *out = *in + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } return } @@ -2178,6 +2264,44 @@ func (in *HostResourceInfo) DeepCopy() *HostResourceInfo { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HydrateOperation) DeepCopyInto(out *HydrateOperation) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + if in.FinishedAt != nil { + in, out := &in.FinishedAt, &out.FinishedAt + *out = (*in).DeepCopy() + } + in.SourceHydrator.DeepCopyInto(&out.SourceHydrator) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HydrateOperation. +func (in *HydrateOperation) DeepCopy() *HydrateOperation { + if in == nil { + return nil + } + out := new(HydrateOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HydrateTo) DeepCopyInto(out *HydrateTo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HydrateTo. +func (in *HydrateTo) DeepCopy() *HydrateTo { + if in == nil { + return nil + } + out := new(HydrateTo) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in IgnoreDifferences) DeepCopyInto(out *IgnoreDifferences) { { @@ -3068,6 +3192,16 @@ func (in *PullRequestGeneratorBitbucketServer) DeepCopyInto(out *PullRequestGene *out = new(BasicAuthBitbucketServer) (*in).DeepCopyInto(*out) } + if in.BearerToken != nil { + in, out := &in.BearerToken, &out.BearerToken + *out = new(BearerTokenBitbucket) + (*in).DeepCopyInto(*out) + } + if in.CARef != nil { + in, out := &in.CARef, &out.CARef + *out = new(ConfigMapKeyRef) + **out = **in + } return } @@ -3120,6 +3254,11 @@ func (in *PullRequestGeneratorGitLab) DeepCopyInto(out *PullRequestGeneratorGitL *out = make([]string, len(*in)) copy(*out, *in) } + if in.CARef != nil { + in, out := &in.CARef, &out.CARef + *out = new(ConfigMapKeyRef) + **out = **in + } return } @@ -3576,7 +3715,7 @@ func (in *ResourceNode) DeepCopyInto(out *ResourceNode) { if in.Health != nil { in, out := &in.Health, &out.Health *out = new(HealthStatus) - **out = **in + (*in).DeepCopyInto(*out) } if in.CreatedAt != nil { in, out := &in.CreatedAt, &out.CreatedAt @@ -3682,7 +3821,7 @@ func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { if in.Health != nil { in, out := &in.Health, &out.Health *out = new(HealthStatus) - **out = **in + (*in).DeepCopyInto(*out) } return } @@ -3945,6 +4084,16 @@ func (in *SCMProviderGeneratorBitbucketServer) DeepCopyInto(out *SCMProviderGene *out = new(BasicAuthBitbucketServer) (*in).DeepCopyInto(*out) } + if in.BearerToken != nil { + in, out := &in.BearerToken, &out.BearerToken + *out = new(BearerTokenBitbucket) + (*in).DeepCopyInto(*out) + } + if in.CARef != nil { + in, out := &in.CARef, &out.CARef + *out = new(ConfigMapKeyRef) + **out = **in + } return } @@ -4054,6 +4203,11 @@ func (in *SCMProviderGeneratorGitlab) DeepCopyInto(out *SCMProviderGeneratorGitl *out = new(bool) **out = **in } + if in.CARef != nil { + in, out := &in.CARef, &out.CARef + *out = new(ConfigMapKeyRef) + **out = **in + } return } @@ -4099,6 +4253,72 @@ func (in *SignatureKey) DeepCopy() *SignatureKey { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceHydrator) DeepCopyInto(out *SourceHydrator) { + *out = *in + out.DrySource = in.DrySource + out.SyncSource = in.SyncSource + if in.HydrateTo != nil { + in, out := &in.HydrateTo, &out.HydrateTo + *out = new(HydrateTo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceHydrator. +func (in *SourceHydrator) DeepCopy() *SourceHydrator { + if in == nil { + return nil + } + out := new(SourceHydrator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceHydratorStatus) DeepCopyInto(out *SourceHydratorStatus) { + *out = *in + if in.LastSuccessfulOperation != nil { + in, out := &in.LastSuccessfulOperation, &out.LastSuccessfulOperation + *out = new(SuccessfulHydrateOperation) + (*in).DeepCopyInto(*out) + } + if in.CurrentOperation != nil { + in, out := &in.CurrentOperation, &out.CurrentOperation + *out = new(HydrateOperation) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceHydratorStatus. +func (in *SourceHydratorStatus) DeepCopy() *SourceHydratorStatus { + if in == nil { + return nil + } + out := new(SourceHydratorStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SuccessfulHydrateOperation) DeepCopyInto(out *SuccessfulHydrateOperation) { + *out = *in + in.SourceHydrator.DeepCopyInto(&out.SourceHydrator) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SuccessfulHydrateOperation. +func (in *SuccessfulHydrateOperation) DeepCopy() *SuccessfulHydrateOperation { + if in == nil { + return nil + } + out := new(SuccessfulHydrateOperation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SyncOperation) DeepCopyInto(out *SyncOperation) { *out = *in @@ -4285,6 +4505,22 @@ func (in *SyncPolicyAutomated) DeepCopy() *SyncPolicyAutomated { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SyncSource) DeepCopyInto(out *SyncSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncSource. +func (in *SyncSource) DeepCopy() *SyncSource { + if in == nil { + return nil + } + out := new(SyncSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SyncStatus) DeepCopyInto(out *SyncStatus) { *out = *in diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/doc.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/doc.go deleted file mode 100644 index 0e0c2a890..000000000 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package versioned diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake/clientset_generated.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake/clientset_generated.go index 5db236c54..fe6096b4c 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -15,8 +15,12 @@ import ( // NewSimpleClientset returns a clientset that will respond with the provided objects. // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, -// without applying any validations and/or defaults. It shouldn't be considered a replacement +// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement // for a real clientset and is mostly useful in simple unit tests. +// +// DEPRECATED: NewClientset replaces this with support for field management, which significantly improves +// server side apply testing. NewClientset is only available when apply configurations are generated (e.g. +// via --with-applyconfig). func NewSimpleClientset(objects ...runtime.Object) *Clientset { o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) for _, obj := range objects { diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/application.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/application.go index b51e0cb94..69546f444 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/application.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/application.go @@ -4,14 +4,13 @@ package v1alpha1 import ( "context" - "time" v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" scheme "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ApplicationsGetter has a method to return a ApplicationInterface. @@ -35,128 +34,18 @@ type ApplicationInterface interface { // applications implements ApplicationInterface type applications struct { - client rest.Interface - ns string + *gentype.ClientWithList[*v1alpha1.Application, *v1alpha1.ApplicationList] } // newApplications returns a Applications func newApplications(c *ArgoprojV1alpha1Client, namespace string) *applications { return &applications{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*v1alpha1.Application, *v1alpha1.ApplicationList]( + "applications", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.Application { return &v1alpha1.Application{} }, + func() *v1alpha1.ApplicationList { return &v1alpha1.ApplicationList{} }), } } - -// Get takes name of the application, and returns the corresponding application object, and an error if there is any. -func (c *applications) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Application, err error) { - result = &v1alpha1.Application{} - err = c.client.Get(). - Namespace(c.ns). - Resource("applications"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Applications that match those selectors. -func (c *applications) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ApplicationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ApplicationList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("applications"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested applications. -func (c *applications) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("applications"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a application and creates it. Returns the server's representation of the application, and an error, if there is any. -func (c *applications) Create(ctx context.Context, application *v1alpha1.Application, opts v1.CreateOptions) (result *v1alpha1.Application, err error) { - result = &v1alpha1.Application{} - err = c.client.Post(). - Namespace(c.ns). - Resource("applications"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(application). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a application and updates it. Returns the server's representation of the application, and an error, if there is any. -func (c *applications) Update(ctx context.Context, application *v1alpha1.Application, opts v1.UpdateOptions) (result *v1alpha1.Application, err error) { - result = &v1alpha1.Application{} - err = c.client.Put(). - Namespace(c.ns). - Resource("applications"). - Name(application.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(application). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the application and deletes it. Returns an error if one occurs. -func (c *applications) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("applications"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *applications) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("applications"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched application. -func (c *applications) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Application, err error) { - result = &v1alpha1.Application{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("applications"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/applicationset.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/applicationset.go index 5c00011e7..9fadaabc0 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/applicationset.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/applicationset.go @@ -4,14 +4,13 @@ package v1alpha1 import ( "context" - "time" v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" scheme "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // ApplicationSetsGetter has a method to return a ApplicationSetInterface. @@ -35,128 +34,18 @@ type ApplicationSetInterface interface { // applicationSets implements ApplicationSetInterface type applicationSets struct { - client rest.Interface - ns string + *gentype.ClientWithList[*v1alpha1.ApplicationSet, *v1alpha1.ApplicationSetList] } // newApplicationSets returns a ApplicationSets func newApplicationSets(c *ArgoprojV1alpha1Client, namespace string) *applicationSets { return &applicationSets{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*v1alpha1.ApplicationSet, *v1alpha1.ApplicationSetList]( + "applicationsets", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.ApplicationSet { return &v1alpha1.ApplicationSet{} }, + func() *v1alpha1.ApplicationSetList { return &v1alpha1.ApplicationSetList{} }), } } - -// Get takes name of the applicationSet, and returns the corresponding applicationSet object, and an error if there is any. -func (c *applicationSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ApplicationSet, err error) { - result = &v1alpha1.ApplicationSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("applicationsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ApplicationSets that match those selectors. -func (c *applicationSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ApplicationSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ApplicationSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("applicationsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested applicationSets. -func (c *applicationSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("applicationsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a applicationSet and creates it. Returns the server's representation of the applicationSet, and an error, if there is any. -func (c *applicationSets) Create(ctx context.Context, applicationSet *v1alpha1.ApplicationSet, opts v1.CreateOptions) (result *v1alpha1.ApplicationSet, err error) { - result = &v1alpha1.ApplicationSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("applicationsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(applicationSet). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a applicationSet and updates it. Returns the server's representation of the applicationSet, and an error, if there is any. -func (c *applicationSets) Update(ctx context.Context, applicationSet *v1alpha1.ApplicationSet, opts v1.UpdateOptions) (result *v1alpha1.ApplicationSet, err error) { - result = &v1alpha1.ApplicationSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("applicationsets"). - Name(applicationSet.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(applicationSet). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the applicationSet and deletes it. Returns an error if one occurs. -func (c *applicationSets) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("applicationsets"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *applicationSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("applicationsets"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched applicationSet. -func (c *applicationSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ApplicationSet, err error) { - result = &v1alpha1.ApplicationSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("applicationsets"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/appproject.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/appproject.go index 386f2db3f..a20ec8041 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/appproject.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/appproject.go @@ -4,14 +4,13 @@ package v1alpha1 import ( "context" - "time" v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" scheme "github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/scheme" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" + gentype "k8s.io/client-go/gentype" ) // AppProjectsGetter has a method to return a AppProjectInterface. @@ -35,128 +34,18 @@ type AppProjectInterface interface { // appProjects implements AppProjectInterface type appProjects struct { - client rest.Interface - ns string + *gentype.ClientWithList[*v1alpha1.AppProject, *v1alpha1.AppProjectList] } // newAppProjects returns a AppProjects func newAppProjects(c *ArgoprojV1alpha1Client, namespace string) *appProjects { return &appProjects{ - client: c.RESTClient(), - ns: namespace, + gentype.NewClientWithList[*v1alpha1.AppProject, *v1alpha1.AppProjectList]( + "appprojects", + c.RESTClient(), + scheme.ParameterCodec, + namespace, + func() *v1alpha1.AppProject { return &v1alpha1.AppProject{} }, + func() *v1alpha1.AppProjectList { return &v1alpha1.AppProjectList{} }), } } - -// Get takes name of the appProject, and returns the corresponding appProject object, and an error if there is any. -func (c *appProjects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AppProject, err error) { - result = &v1alpha1.AppProject{} - err = c.client.Get(). - Namespace(c.ns). - Resource("appprojects"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of AppProjects that match those selectors. -func (c *appProjects) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AppProjectList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.AppProjectList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("appprojects"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested appProjects. -func (c *appProjects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("appprojects"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a appProject and creates it. Returns the server's representation of the appProject, and an error, if there is any. -func (c *appProjects) Create(ctx context.Context, appProject *v1alpha1.AppProject, opts v1.CreateOptions) (result *v1alpha1.AppProject, err error) { - result = &v1alpha1.AppProject{} - err = c.client.Post(). - Namespace(c.ns). - Resource("appprojects"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(appProject). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a appProject and updates it. Returns the server's representation of the appProject, and an error, if there is any. -func (c *appProjects) Update(ctx context.Context, appProject *v1alpha1.AppProject, opts v1.UpdateOptions) (result *v1alpha1.AppProject, err error) { - result = &v1alpha1.AppProject{} - err = c.client.Put(). - Namespace(c.ns). - Resource("appprojects"). - Name(appProject.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(appProject). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the appProject and deletes it. Returns an error if one occurs. -func (c *appProjects) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("appprojects"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *appProjects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("appprojects"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched appProject. -func (c *appProjects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AppProject, err error) { - result = &v1alpha1.AppProject{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("appprojects"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake/fake_application.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake/fake_application.go index f70ce5423..4a6cb8f9e 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake/fake_application.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake/fake_application.go @@ -8,7 +8,6 @@ import ( v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -20,28 +19,30 @@ type FakeApplications struct { ns string } -var applicationsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "applications"} +var applicationsResource = v1alpha1.SchemeGroupVersion.WithResource("applications") -var applicationsKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "Application"} +var applicationsKind = v1alpha1.SchemeGroupVersion.WithKind("Application") // Get takes name of the application, and returns the corresponding application object, and an error if there is any. func (c *FakeApplications) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Application, err error) { + emptyResult := &v1alpha1.Application{} obj, err := c.Fake. - Invokes(testing.NewGetAction(applicationsResource, c.ns, name), &v1alpha1.Application{}) + Invokes(testing.NewGetActionWithOptions(applicationsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Application), err } // List takes label and field selectors, and returns the list of Applications that match those selectors. func (c *FakeApplications) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ApplicationList, err error) { + emptyResult := &v1alpha1.ApplicationList{} obj, err := c.Fake. - Invokes(testing.NewListAction(applicationsResource, applicationsKind, c.ns, opts), &v1alpha1.ApplicationList{}) + Invokes(testing.NewListActionWithOptions(applicationsResource, applicationsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -60,28 +61,30 @@ func (c *FakeApplications) List(ctx context.Context, opts v1.ListOptions) (resul // Watch returns a watch.Interface that watches the requested applications. func (c *FakeApplications) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(applicationsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(applicationsResource, c.ns, opts)) } // Create takes the representation of a application and creates it. Returns the server's representation of the application, and an error, if there is any. func (c *FakeApplications) Create(ctx context.Context, application *v1alpha1.Application, opts v1.CreateOptions) (result *v1alpha1.Application, err error) { + emptyResult := &v1alpha1.Application{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(applicationsResource, c.ns, application), &v1alpha1.Application{}) + Invokes(testing.NewCreateActionWithOptions(applicationsResource, c.ns, application, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Application), err } // Update takes the representation of a application and updates it. Returns the server's representation of the application, and an error, if there is any. func (c *FakeApplications) Update(ctx context.Context, application *v1alpha1.Application, opts v1.UpdateOptions) (result *v1alpha1.Application, err error) { + emptyResult := &v1alpha1.Application{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(applicationsResource, c.ns, application), &v1alpha1.Application{}) + Invokes(testing.NewUpdateActionWithOptions(applicationsResource, c.ns, application, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Application), err } @@ -96,7 +99,7 @@ func (c *FakeApplications) Delete(ctx context.Context, name string, opts v1.Dele // DeleteCollection deletes a collection of objects. func (c *FakeApplications) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(applicationsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(applicationsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ApplicationList{}) return err @@ -104,11 +107,12 @@ func (c *FakeApplications) DeleteCollection(ctx context.Context, opts v1.DeleteO // Patch applies the patch and returns the patched application. func (c *FakeApplications) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Application, err error) { + emptyResult := &v1alpha1.Application{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(applicationsResource, c.ns, name, pt, data, subresources...), &v1alpha1.Application{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(applicationsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.Application), err } diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake/fake_applicationset.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake/fake_applicationset.go index d1ae09977..04157e3d0 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake/fake_applicationset.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake/fake_applicationset.go @@ -8,7 +8,6 @@ import ( v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -20,28 +19,30 @@ type FakeApplicationSets struct { ns string } -var applicationsetsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "applicationsets"} +var applicationsetsResource = v1alpha1.SchemeGroupVersion.WithResource("applicationsets") -var applicationsetsKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "ApplicationSet"} +var applicationsetsKind = v1alpha1.SchemeGroupVersion.WithKind("ApplicationSet") // Get takes name of the applicationSet, and returns the corresponding applicationSet object, and an error if there is any. func (c *FakeApplicationSets) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ApplicationSet, err error) { + emptyResult := &v1alpha1.ApplicationSet{} obj, err := c.Fake. - Invokes(testing.NewGetAction(applicationsetsResource, c.ns, name), &v1alpha1.ApplicationSet{}) + Invokes(testing.NewGetActionWithOptions(applicationsetsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ApplicationSet), err } // List takes label and field selectors, and returns the list of ApplicationSets that match those selectors. func (c *FakeApplicationSets) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ApplicationSetList, err error) { + emptyResult := &v1alpha1.ApplicationSetList{} obj, err := c.Fake. - Invokes(testing.NewListAction(applicationsetsResource, applicationsetsKind, c.ns, opts), &v1alpha1.ApplicationSetList{}) + Invokes(testing.NewListActionWithOptions(applicationsetsResource, applicationsetsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -60,28 +61,30 @@ func (c *FakeApplicationSets) List(ctx context.Context, opts v1.ListOptions) (re // Watch returns a watch.Interface that watches the requested applicationSets. func (c *FakeApplicationSets) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(applicationsetsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(applicationsetsResource, c.ns, opts)) } // Create takes the representation of a applicationSet and creates it. Returns the server's representation of the applicationSet, and an error, if there is any. func (c *FakeApplicationSets) Create(ctx context.Context, applicationSet *v1alpha1.ApplicationSet, opts v1.CreateOptions) (result *v1alpha1.ApplicationSet, err error) { + emptyResult := &v1alpha1.ApplicationSet{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(applicationsetsResource, c.ns, applicationSet), &v1alpha1.ApplicationSet{}) + Invokes(testing.NewCreateActionWithOptions(applicationsetsResource, c.ns, applicationSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ApplicationSet), err } // Update takes the representation of a applicationSet and updates it. Returns the server's representation of the applicationSet, and an error, if there is any. func (c *FakeApplicationSets) Update(ctx context.Context, applicationSet *v1alpha1.ApplicationSet, opts v1.UpdateOptions) (result *v1alpha1.ApplicationSet, err error) { + emptyResult := &v1alpha1.ApplicationSet{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(applicationsetsResource, c.ns, applicationSet), &v1alpha1.ApplicationSet{}) + Invokes(testing.NewUpdateActionWithOptions(applicationsetsResource, c.ns, applicationSet, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ApplicationSet), err } @@ -96,7 +99,7 @@ func (c *FakeApplicationSets) Delete(ctx context.Context, name string, opts v1.D // DeleteCollection deletes a collection of objects. func (c *FakeApplicationSets) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(applicationsetsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(applicationsetsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.ApplicationSetList{}) return err @@ -104,11 +107,12 @@ func (c *FakeApplicationSets) DeleteCollection(ctx context.Context, opts v1.Dele // Patch applies the patch and returns the patched applicationSet. func (c *FakeApplicationSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ApplicationSet, err error) { + emptyResult := &v1alpha1.ApplicationSet{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(applicationsetsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ApplicationSet{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(applicationsetsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.ApplicationSet), err } diff --git a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake/fake_appproject.go b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake/fake_appproject.go index 802699f4c..d510e445a 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake/fake_appproject.go +++ b/vendor/github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake/fake_appproject.go @@ -8,7 +8,6 @@ import ( v1alpha1 "github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" testing "k8s.io/client-go/testing" @@ -20,28 +19,30 @@ type FakeAppProjects struct { ns string } -var appprojectsResource = schema.GroupVersionResource{Group: "argoproj.io", Version: "v1alpha1", Resource: "appprojects"} +var appprojectsResource = v1alpha1.SchemeGroupVersion.WithResource("appprojects") -var appprojectsKind = schema.GroupVersionKind{Group: "argoproj.io", Version: "v1alpha1", Kind: "AppProject"} +var appprojectsKind = v1alpha1.SchemeGroupVersion.WithKind("AppProject") // Get takes name of the appProject, and returns the corresponding appProject object, and an error if there is any. func (c *FakeAppProjects) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.AppProject, err error) { + emptyResult := &v1alpha1.AppProject{} obj, err := c.Fake. - Invokes(testing.NewGetAction(appprojectsResource, c.ns, name), &v1alpha1.AppProject{}) + Invokes(testing.NewGetActionWithOptions(appprojectsResource, c.ns, name, options), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.AppProject), err } // List takes label and field selectors, and returns the list of AppProjects that match those selectors. func (c *FakeAppProjects) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.AppProjectList, err error) { + emptyResult := &v1alpha1.AppProjectList{} obj, err := c.Fake. - Invokes(testing.NewListAction(appprojectsResource, appprojectsKind, c.ns, opts), &v1alpha1.AppProjectList{}) + Invokes(testing.NewListActionWithOptions(appprojectsResource, appprojectsKind, c.ns, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } label, _, _ := testing.ExtractFromListOptions(opts) @@ -60,28 +61,30 @@ func (c *FakeAppProjects) List(ctx context.Context, opts v1.ListOptions) (result // Watch returns a watch.Interface that watches the requested appProjects. func (c *FakeAppProjects) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { return c.Fake. - InvokesWatch(testing.NewWatchAction(appprojectsResource, c.ns, opts)) + InvokesWatch(testing.NewWatchActionWithOptions(appprojectsResource, c.ns, opts)) } // Create takes the representation of a appProject and creates it. Returns the server's representation of the appProject, and an error, if there is any. func (c *FakeAppProjects) Create(ctx context.Context, appProject *v1alpha1.AppProject, opts v1.CreateOptions) (result *v1alpha1.AppProject, err error) { + emptyResult := &v1alpha1.AppProject{} obj, err := c.Fake. - Invokes(testing.NewCreateAction(appprojectsResource, c.ns, appProject), &v1alpha1.AppProject{}) + Invokes(testing.NewCreateActionWithOptions(appprojectsResource, c.ns, appProject, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.AppProject), err } // Update takes the representation of a appProject and updates it. Returns the server's representation of the appProject, and an error, if there is any. func (c *FakeAppProjects) Update(ctx context.Context, appProject *v1alpha1.AppProject, opts v1.UpdateOptions) (result *v1alpha1.AppProject, err error) { + emptyResult := &v1alpha1.AppProject{} obj, err := c.Fake. - Invokes(testing.NewUpdateAction(appprojectsResource, c.ns, appProject), &v1alpha1.AppProject{}) + Invokes(testing.NewUpdateActionWithOptions(appprojectsResource, c.ns, appProject, opts), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.AppProject), err } @@ -96,7 +99,7 @@ func (c *FakeAppProjects) Delete(ctx context.Context, name string, opts v1.Delet // DeleteCollection deletes a collection of objects. func (c *FakeAppProjects) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewDeleteCollectionAction(appprojectsResource, c.ns, listOpts) + action := testing.NewDeleteCollectionActionWithOptions(appprojectsResource, c.ns, opts, listOpts) _, err := c.Fake.Invokes(action, &v1alpha1.AppProjectList{}) return err @@ -104,11 +107,12 @@ func (c *FakeAppProjects) DeleteCollection(ctx context.Context, opts v1.DeleteOp // Patch applies the patch and returns the patched appProject. func (c *FakeAppProjects) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.AppProject, err error) { + emptyResult := &v1alpha1.AppProject{} obj, err := c.Fake. - Invokes(testing.NewPatchSubresourceAction(appprojectsResource, c.ns, name, pt, data, subresources...), &v1alpha1.AppProject{}) + Invokes(testing.NewPatchSubresourceActionWithOptions(appprojectsResource, c.ns, name, pt, data, opts, subresources...), emptyResult) if obj == nil { - return nil, err + return emptyResult, err } return obj.(*v1alpha1.AppProject), err } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/assets/assets.go b/vendor/github.com/argoproj/argo-cd/v2/util/assets/assets.go new file mode 100644 index 000000000..3b53b226e --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/util/assets/assets.go @@ -0,0 +1,35 @@ +package assets + +import ( + "github.com/argoproj/argo-cd/v2/assets" +) + +var ( + BuiltinPolicyCSV string + ModelConf string + SwaggerJSON string + BadgeSVG string +) + +func init() { + data, err := assets.Embedded.ReadFile("builtin-policy.csv") + if err != nil { + panic(err) + } + BuiltinPolicyCSV = string(data) + data, err = assets.Embedded.ReadFile("model.conf") + if err != nil { + panic(err) + } + ModelConf = string(data) + data, err = assets.Embedded.ReadFile("swagger.json") + if err != nil { + panic(err) + } + SwaggerJSON = string(data) + data, err = assets.Embedded.ReadFile("badge.svg") + if err != nil { + panic(err) + } + BadgeSVG = string(data) +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis.go b/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis.go index 5a832fd6c..2c938b699 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/cache/redis.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "net" + "sync" "time" ioutil "github.com/argoproj/argo-cd/v2/util/io" @@ -200,6 +201,11 @@ func (redisHook) ProcessPipelineHook(next redis.ProcessPipelineHook) redis.Proce } // CollectMetrics add transport wrapper that pushes metrics into the specified metrics registry -func CollectMetrics(client *redis.Client, registry MetricsRegistry) { +// Lock should be shared between functions that can add/process a Redis hook. +func CollectMetrics(client *redis.Client, registry MetricsRegistry, lock *sync.RWMutex) { + if lock != nil { + lock.Lock() + defer lock.Unlock() + } client.AddHook(&redisHook{registry: registry}) } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/collections/maps.go b/vendor/github.com/argoproj/argo-cd/v2/util/collections/maps.go deleted file mode 100644 index d7a429436..000000000 --- a/vendor/github.com/argoproj/argo-cd/v2/util/collections/maps.go +++ /dev/null @@ -1,36 +0,0 @@ -package collections - -import "reflect" - -// CopyStringMap creates copy of a string map -func CopyStringMap(in map[string]string) map[string]string { - out := map[string]string{} - for k, v := range in { - out[k] = v - } - return out -} - -// StringMapsEqual compares two string maps assuming that nil and empty map are considered equal -func StringMapsEqual(first map[string]string, second map[string]string) bool { - if first == nil { - first = map[string]string{} - } - if second == nil { - second = map[string]string{} - } - return reflect.DeepEqual(first, second) -} - -func MergeStringMaps(items ...map[string]string) map[string]string { - res := make(map[string]string) - for _, m := range items { - if m == nil { - continue - } - for k, v := range m { - res[k] = v - } - } - return res -} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/config/env.go b/vendor/github.com/argoproj/argo-cd/v2/util/config/env.go index d2007fba6..f5576e649 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/config/env.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/config/env.go @@ -14,13 +14,13 @@ import ( var flags map[string]string func init() { - err := loadFlags() + err := LoadFlags() if err != nil { log.Fatal(err) } } -func loadFlags() error { +func LoadFlags() error { flags = make(map[string]string) opts, err := shellquote.Split(os.Getenv("ARGOCD_OPTS")) diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/env/env.go b/vendor/github.com/argoproj/argo-cd/v2/util/env/env.go index e9c2ff41d..686ef8089 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/env/env.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/env/env.go @@ -7,8 +7,6 @@ import ( "strings" "time" - timeutil "github.com/argoproj/pkg/time" - log "github.com/sirupsen/logrus" ) @@ -133,13 +131,12 @@ func ParseDurationFromEnv(env string, defaultValue, min, max time.Duration) time if str == "" { return defaultValue } - durPtr, err := timeutil.ParseDuration(str) + dur, err := time.ParseDuration(str) if err != nil { log.Warnf("Could not parse '%s' as a duration string from environment %s", str, env) return defaultValue } - dur := *durPtr if dur < min { log.Warnf("Value in %s is %s, which is less than minimum %s allowed", env, dur, min) return defaultValue diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/exec/exec.go b/vendor/github.com/argoproj/argo-cd/v2/util/exec/exec.go index 493d8855c..17eab41a2 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/exec/exec.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/exec/exec.go @@ -52,7 +52,7 @@ func RunWithRedactor(cmd *exec.Cmd, redactor func(text string) string) (string, func RunWithExecRunOpts(cmd *exec.Cmd, opts ExecRunOpts) (string, error) { cmdOpts := argoexec.CmdOpts{Timeout: timeout, Redactor: opts.Redactor, TimeoutBehavior: opts.TimeoutBehavior, SkipErrorLogging: opts.SkipErrorLogging} span := tracing.NewLoggingTracer(log.NewLogrusLogger(log.NewWithCurrentConfig())).StartSpan(fmt.Sprintf("exec %v", cmd.Args[0])) - span.SetBaggageItem("dir", fmt.Sprintf("%v", cmd.Dir)) + span.SetBaggageItem("dir", cmd.Dir) if cmdOpts.Redactor != nil { span.SetBaggageItem("args", opts.Redactor(fmt.Sprintf("%v", cmd.Args))) } else { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/git/client.go b/vendor/github.com/argoproj/argo-cd/v2/util/git/client.go index 80ec70606..0b9ab11b7 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/git/client.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/git/client.go @@ -69,7 +69,7 @@ type Client interface { Init() error Fetch(revision string) error Submodule() error - Checkout(revision string, submoduleEnabled bool) error + Checkout(revision string, submoduleEnabled bool) (string, error) LsRefs() (*Refs, error) LsRemote(revision string) (string, error) LsFiles(path string, enableNewGitFileGlobbing bool) ([]string, error) @@ -80,11 +80,23 @@ type Client interface { IsAnnotatedTag(string) bool ChangedFiles(revision string, targetRevision string) ([]string, error) IsRevisionPresent(revision string) bool + // SetAuthor sets the author name and email in the git configuration. + SetAuthor(name, email string) (string, error) + // CheckoutOrOrphan checks out the branch. If the branch does not exist, it creates an orphan branch. + CheckoutOrOrphan(branch string, submoduleEnabled bool) (string, error) + // CheckoutOrNew checks out the given branch. If the branch does not exist, it creates an empty branch based on + // the base branch. + CheckoutOrNew(branch, base string, submoduleEnabled bool) (string, error) + // RemoveContents removes all files from the git repository. + RemoveContents() (string, error) + // CommitAndPush commits and pushes changes to the target branch. + CommitAndPush(branch, message string) (string, error) } type EventHandlers struct { OnLsRemote func(repo string) func() OnFetch func(repo string) func() + OnPush func(repo string) func() } // nativeGitClient implements Client interface using git CLI @@ -459,43 +471,43 @@ func (m *nativeGitClient) Submodule() error { return nil } -// Checkout checkout specified revision -func (m *nativeGitClient) Checkout(revision string, submoduleEnabled bool) error { +// Checkout checks out the specified revision +func (m *nativeGitClient) Checkout(revision string, submoduleEnabled bool) (string, error) { if revision == "" || revision == "HEAD" { revision = "origin/HEAD" } - if _, err := m.runCmd("checkout", "--force", revision); err != nil { - return err + if out, err := m.runCmd("checkout", "--force", revision); err != nil { + return out, fmt.Errorf("failed to checkout %s: %w", revision, err) } // We must populate LFS content by using lfs checkout, if we have at least // one LFS reference in the current revision. if m.IsLFSEnabled() { if largeFiles, err := m.LsLargeFiles(); err == nil { if len(largeFiles) > 0 { - if _, err := m.runCmd("lfs", "checkout"); err != nil { - return err + if out, err := m.runCmd("lfs", "checkout"); err != nil { + return out, fmt.Errorf("failed to checkout LFS files: %w", err) } } } else { - return err + return "", fmt.Errorf("failed to list LFS files: %w", err) } } if _, err := os.Stat(m.root + "/.gitmodules"); !os.IsNotExist(err) { if submoduleEnabled { if err := m.Submodule(); err != nil { - return err + return "", fmt.Errorf("failed to update submodules: %w", err) } } } // NOTE // The double “f” in the arguments is not a typo: the first “f” tells // `git clean` to delete untracked files and directories, and the second “f” - // tells it to clean untractked nested Git repositories (for example a + // tells it to clean untracked nested Git repositories (for example a // submodule which has since been removed). - if _, err := m.runCmd("clean", "-ffdx"); err != nil { - return err + if out, err := m.runCmd("clean", "-ffdx"); err != nil { + return out, fmt.Errorf("failed to clean: %w", err) } - return nil + return "", nil } func (m *nativeGitClient) getRefs() ([]*plumbing.Reference, error) { @@ -624,7 +636,7 @@ func (m *nativeGitClient) lsRemote(revision string) (string, error) { refs, err := m.getRefs() if err != nil { - return "", err + return "", fmt.Errorf("failed to list refs: %w", err) } if revision == "" { @@ -746,11 +758,11 @@ func (m *nativeGitClient) CommitSHA() (string, error) { // returns the meta-data for the commit func (m *nativeGitClient) RevisionMetadata(revision string) (*RevisionMetadata, error) { - out, err := m.runCmd("show", "-s", "--format=%an <%ae>|%at|%B", revision) + out, err := m.runCmd("show", "-s", "--format=%an <%ae>%n%at%n%B", revision) if err != nil { return nil, err } - segments := strings.SplitN(out, "|", 3) + segments := strings.SplitN(out, "\n", 3) if len(segments) != 3 { return nil, fmt.Errorf("expected 3 segments, got %v", segments) } @@ -811,6 +823,123 @@ func (m *nativeGitClient) ChangedFiles(revision string, targetRevision string) ( return files, nil } +// config runs a git config command. +func (m *nativeGitClient) config(args ...string) (string, error) { + args = append([]string{"config"}, args...) + out, err := m.runCmd(args...) + if err != nil { + return out, fmt.Errorf("failed to run git config: %w", err) + } + return out, nil +} + +// SetAuthor sets the author name and email in the git configuration. +func (m *nativeGitClient) SetAuthor(name, email string) (string, error) { + if name != "" { + out, err := m.config("--local", "user.name", name) + if err != nil { + return out, err + } + } + if email != "" { + out, err := m.config("--local", "user.email", email) + if err != nil { + return out, err + } + } + return "", nil +} + +// CheckoutOrOrphan checks out the branch. If the branch does not exist, it creates an orphan branch. +func (m *nativeGitClient) CheckoutOrOrphan(branch string, submoduleEnabled bool) (string, error) { + out, err := m.Checkout(branch, submoduleEnabled) + if err != nil { + // If the branch doesn't exist, create it as an orphan branch. + if strings.Contains(err.Error(), "did not match any file(s) known to git") { + out, err = m.runCmd("switch", "--orphan", branch) + if err != nil { + return out, fmt.Errorf("failed to create orphan branch: %w", err) + } + } else { + return out, fmt.Errorf("failed to checkout branch: %w", err) + } + + // Make an empty initial commit. + out, err = m.runCmd("commit", "--allow-empty", "-m", "Initial commit") + if err != nil { + return out, fmt.Errorf("failed to commit initial commit: %w", err) + } + + // Push the commit. + err = m.runCredentialedCmd("push", "origin", branch) + if err != nil { + return "", fmt.Errorf("failed to push to branch: %w", err) + } + } + return "", nil +} + +// CheckoutOrNew checks out the given branch. If the branch does not exist, it creates an empty branch based on +// the base branch. +func (m *nativeGitClient) CheckoutOrNew(branch, base string, submoduleEnabled bool) (string, error) { + out, err := m.Checkout(branch, submoduleEnabled) + if err != nil { + if strings.Contains(err.Error(), "did not match any file(s) known to git") { + // If the branch does not exist, create any empty branch based on the sync branch + // First, checkout the sync branch. + out, err = m.Checkout(base, submoduleEnabled) + if err != nil { + return out, fmt.Errorf("failed to checkout sync branch: %w", err) + } + + out, err = m.runCmd("checkout", "-b", branch) + if err != nil { + return out, fmt.Errorf("failed to create branch: %w", err) + } + } else { + return out, fmt.Errorf("failed to checkout branch: %w", err) + } + } + return "", nil +} + +// RemoveContents removes all files from the git repository. +func (m *nativeGitClient) RemoveContents() (string, error) { + out, err := m.runCmd("rm", "-r", "--ignore-unmatch", ".") + if err != nil { + return out, fmt.Errorf("failed to clear repo contents: %w", err) + } + return "", nil +} + +// CommitAndPush commits and pushes changes to the target branch. +func (m *nativeGitClient) CommitAndPush(branch, message string) (string, error) { + out, err := m.runCmd("add", ".") + if err != nil { + return out, fmt.Errorf("failed to add files: %w", err) + } + + out, err = m.runCmd("commit", "-m", message) + if err != nil { + if strings.Contains(out, "nothing to commit, working tree clean") { + return out, nil + } + return out, fmt.Errorf("failed to commit: %w", err) + } + + if m.OnPush != nil { + done := m.OnPush(m.repoURL) + defer done() + } + + err = m.runCredentialedCmd("push", "origin", branch) + if err != nil { + return "", fmt.Errorf("failed to push: %w", err) + } + + return "", nil +} + // runWrapper runs a custom command with all the semantics of running the Git client func (m *nativeGitClient) runGnuPGWrapper(wrapper string, args ...string) (string, error) { cmd := exec.Command(wrapper, args...) @@ -850,7 +979,7 @@ func (m *nativeGitClient) runCredentialedCmd(args ...string) error { func (m *nativeGitClient) runCmdOutput(cmd *exec.Cmd, ropts runOpts) (string, error) { cmd.Dir = m.root cmd.Env = append(os.Environ(), cmd.Env...) - // Set $HOME to nowhere, so we can be execute Git regardless of any external + // Set $HOME to nowhere, so we can execute Git regardless of any external // authentication keys (e.g. in ~/.ssh) -- this is especially important for // running tests on local machines and/or CircleCI. cmd.Env = append(cmd.Env, "HOME=/dev/null") diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/git/creds.go b/vendor/github.com/argoproj/argo-cd/v2/util/git/creds.go index 5715925dc..9f3675cac 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/git/creds.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/git/creds.go @@ -4,16 +4,20 @@ import ( "context" "crypto/sha256" "encoding/base64" + "encoding/hex" "encoding/json" "errors" "fmt" "io" + "net/http" "net/url" "os" "strconv" "strings" "time" + "github.com/google/go-github/v66/github" + "golang.org/x/oauth2" "golang.org/x/oauth2/google" @@ -77,6 +81,8 @@ type CredsStore interface { type Creds interface { Environ() (io.Closer, []string, error) + // GetUserInfo gets the username and email address for the credentials, if they're available. + GetUserInfo(ctx context.Context) (string, string, error) } // nop implementation @@ -94,16 +100,24 @@ func (c NopCreds) Environ() (io.Closer, []string, error) { return NopCloser{}, nil, nil } +// GetUserInfo returns empty strings for user info +func (c NopCreds) GetUserInfo(ctx context.Context) (name string, email string, err error) { + return "", "", nil +} + var _ io.Closer = NopCloser{} type GenericHTTPSCreds interface { HasClientCert() bool GetClientCertData() string GetClientCertKey() string - Environ() (io.Closer, []string, error) + Creds } -var _ GenericHTTPSCreds = HTTPSCreds{} +var ( + _ GenericHTTPSCreds = HTTPSCreds{} + _ Creds = HTTPSCreds{} +) // HTTPS creds implementation type HTTPSCreds struct { @@ -141,6 +155,12 @@ func NewHTTPSCreds(username string, password string, clientCertData string, clie } } +// GetUserInfo returns the username and email address for the credentials, if they're available. +func (c HTTPSCreds) GetUserInfo(ctx context.Context) (string, string, error) { + // Email not implemented for HTTPS creds. + return c.username, "", nil +} + func (c HTTPSCreds) BasicAuthHeader() string { h := "Authorization: Basic " t := c.username + ":" + c.password @@ -231,6 +251,8 @@ func (c HTTPSCreds) GetClientCertKey() string { return c.clientCertKey } +var _ Creds = SSHCreds{} + // SSH implementation type SSHCreds struct { sshPrivateKey string @@ -245,6 +267,13 @@ func NewSSHCreds(sshPrivateKey string, caPath string, insecureIgnoreHostKey bool return SSHCreds{sshPrivateKey, caPath, insecureIgnoreHostKey, store, proxy, noProxy} } +// GetUserInfo returns empty strings for user info. +// TODO: Implement this method to return the username and email address for the credentials, if they're available. +func (c SSHCreds) GetUserInfo(ctx context.Context) (string, string, error) { + // User info not implemented for SSH creds. + return "", "", nil +} + type sshPrivateKeyFile string type authFilePaths []string @@ -414,6 +443,37 @@ func (g GitHubAppCreds) Environ() (io.Closer, []string, error) { }), env, nil } +// GetUserInfo returns the username and email address for the credentials, if they're available. +func (g GitHubAppCreds) GetUserInfo(ctx context.Context) (string, string, error) { + // We use the apps transport to get the app slug. + appTransport, err := g.getAppTransport() + if err != nil { + return "", "", fmt.Errorf("failed to create GitHub app transport: %w", err) + } + appClient := github.NewClient(&http.Client{Transport: appTransport}) + app, _, err := appClient.Apps.Get(ctx, "") + if err != nil { + return "", "", fmt.Errorf("failed to get app info: %w", err) + } + + // Then we use the installation transport to get the installation info. + appInstallTransport, err := g.getInstallationTransport() + if err != nil { + return "", "", fmt.Errorf("failed to get app installation: %w", err) + } + httpClient := http.Client{Transport: appInstallTransport} + client := github.NewClient(&httpClient) + + appLogin := fmt.Sprintf("%s[bot]", app.GetSlug()) + user, _, err := client.Users.Get(ctx, appLogin) + if err != nil { + return "", "", fmt.Errorf("failed to get app user info: %w", err) + } + authorName := user.GetLogin() + authorEmail := fmt.Sprintf("%d+%s@users.noreply.github.com", user.GetID(), user.GetLogin()) + return authorName, authorEmail, nil +} + // getAccessToken fetches GitHub token using the app id, install id, and private key. // the token is then cached for re-use. func (g GitHubAppCreds) getAccessToken() (string, error) { @@ -421,20 +481,53 @@ func (g GitHubAppCreds) getAccessToken() (string, error) { ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) defer cancel() + itr, err := g.getInstallationTransport() + if err != nil { + return "", fmt.Errorf("failed to create GitHub app installation transport: %w", err) + } + + return itr.Token(ctx) +} + +// getAppTransport creates a new GitHub transport for the app +func (g GitHubAppCreds) getAppTransport() (*ghinstallation.AppsTransport, error) { + // GitHub API url + baseUrl := "https://api.github.com" + if g.baseURL != "" { + baseUrl = strings.TrimSuffix(g.baseURL, "/") + } + + // Create a new GitHub transport + c := GetRepoHTTPClient(baseUrl, g.insecure, g, g.proxy, g.noProxy) + itr, err := ghinstallation.NewAppsTransport(c.Transport, + g.appID, + []byte(g.privateKey), + ) + if err != nil { + return nil, fmt.Errorf("failed to initialize GitHub installation transport: %w", err) + } + + itr.BaseURL = baseUrl + + return itr, nil +} + +// getInstallationTransport creates a new GitHub transport for the app installation +func (g GitHubAppCreds) getInstallationTransport() (*ghinstallation.Transport, error) { // Compute hash of creds for lookup in cache h := sha256.New() _, err := h.Write([]byte(fmt.Sprintf("%s %d %d %s", g.privateKey, g.appID, g.appInstallId, g.baseURL))) if err != nil { - return "", err + return nil, fmt.Errorf("failed to get get SHA256 hash for GitHub app credentials: %w", err) } - key := fmt.Sprintf("%x", h.Sum(nil)) + key := hex.EncodeToString(h.Sum(nil)) // Check cache for GitHub transport which helps fetch an API token t, found := githubAppTokenCache.Get(key) if found { itr := t.(*ghinstallation.Transport) // This method caches the token and if it's expired retrieves a new one - return itr.Token(ctx) + return itr, nil } // GitHub API url @@ -451,7 +544,7 @@ func (g GitHubAppCreds) getAccessToken() (string, error) { []byte(g.privateKey), ) if err != nil { - return "", err + return nil, fmt.Errorf("failed to initialize GitHub installation transport: %w", err) } itr.BaseURL = baseUrl @@ -459,7 +552,7 @@ func (g GitHubAppCreds) getAccessToken() (string, error) { // Add transport to cache githubAppTokenCache.Set(key, itr, time.Minute*60) - return itr.Token(ctx) + return itr, nil } func (g GitHubAppCreds) HasClientCert() bool { @@ -474,6 +567,8 @@ func (g GitHubAppCreds) GetClientCertKey() string { return g.clientCertKey } +var _ Creds = GoogleCloudCreds{} + // GoogleCloudCreds to authenticate to Google Cloud Source repositories type GoogleCloudCreds struct { creds *google.Credentials @@ -489,6 +584,16 @@ func NewGoogleCloudCreds(jsonData string, store CredsStore) GoogleCloudCreds { return GoogleCloudCreds{creds, store} } +// GetUserInfo returns the username and email address for the credentials, if they're available. +// TODO: implement getting email instead of just username. +func (c GoogleCloudCreds) GetUserInfo(ctx context.Context) (string, string, error) { + username, err := c.getUsername() + if err != nil { + return "", "", fmt.Errorf("failed to get username from creds: %w", err) + } + return username, "", nil +} + func (c GoogleCloudCreds) Environ() (io.Closer, []string, error) { username, err := c.getUsername() if err != nil { @@ -543,7 +648,7 @@ func (c GoogleCloudCreds) getAccessToken() (string, error) { if err != nil { return "", err } - key := fmt.Sprintf("%x", h.Sum(nil)) + key := hex.EncodeToString(h.Sum(nil)) t, found := googleCloudTokenSource.Get(key) if found { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/git/git.go b/vendor/github.com/argoproj/argo-cd/v2/util/git/git.go index 0c81791cf..ea2f310b0 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/git/git.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/git/git.go @@ -1,6 +1,7 @@ package git import ( + "fmt" "net/url" "regexp" "strings" @@ -94,10 +95,13 @@ func IsHTTPURL(url string) bool { // TestRepo tests if a repo exists and is accessible with the given credentials func TestRepo(repo string, creds Creds, insecure bool, enableLfs bool, proxy string, noProxy string) error { - clnt, err := NewClient(repo, creds, insecure, enableLfs, proxy, noProxy) + client, err := NewClient(repo, creds, insecure, enableLfs, proxy, noProxy) if err != nil { - return err + return fmt.Errorf("unable to initialize git client: %w", err) } - _, err = clnt.LsRemote("HEAD") - return err + _, err = client.LsRemote("HEAD") + if err != nil { + return fmt.Errorf("unable to ls-remote HEAD on repository: %w", err) + } + return nil } diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/git/workaround.go b/vendor/github.com/argoproj/argo-cd/v2/util/git/workaround.go index 49cdc5634..63f69f81c 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/git/workaround.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/git/workaround.go @@ -85,6 +85,12 @@ func listRemote(r *git.Remote, o *git.ListOptions, insecure bool, creds Creds, p var resultRefs []*plumbing.Reference _ = refs.ForEach(func(ref *plumbing.Reference) error { + if ref.Name().IsTag() { + if peeled, ok := ar.Peeled[ref.Name().String()]; ok { + resultRefs = append(resultRefs, plumbing.NewHashReference(ref.Name(), peeled)) + return nil + } + } resultRefs = append(resultRefs, ref) return nil }) diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go index 4f6e2ed37..d9972adb0 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/client.go @@ -25,6 +25,7 @@ import ( "gopkg.in/yaml.v2" "oras.land/oras-go/v2/registry/remote" "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/credentials" "github.com/argoproj/argo-cd/v2/util/cache" argoio "github.com/argoproj/argo-cd/v2/util/io" @@ -163,6 +164,7 @@ func (c *nativeHelmChart) ExtractChart(chart string, version string, project str cachedChartPath, err := c.getCachedChartPath(chart, version, project) if err != nil { + _ = os.RemoveAll(tempDir) return "", nil, fmt.Errorf("error getting cached chart path: %w", err) } @@ -172,6 +174,7 @@ func (c *nativeHelmChart) ExtractChart(chart string, version string, project str // check if chart tar is already downloaded exists, err := fileExist(cachedChartPath) if err != nil { + _ = os.RemoveAll(tempDir) return "", nil, fmt.Errorf("error checking existence of cached chart path: %w", err) } @@ -179,6 +182,7 @@ func (c *nativeHelmChart) ExtractChart(chart string, version string, project str // create empty temp directory to extract chart from the registry tempDest, err := files.CreateTempDir(os.TempDir()) if err != nil { + _ = os.RemoveAll(tempDir) return "", nil, fmt.Errorf("error creating temporary destination directory: %w", err) } defer func() { _ = os.RemoveAll(tempDest) }() @@ -187,6 +191,7 @@ func (c *nativeHelmChart) ExtractChart(chart string, version string, project str if c.creds.Password != "" && c.creds.Username != "" { _, err = helmCmd.RegistryLogin(c.repoURL, c.creds) if err != nil { + _ = os.RemoveAll(tempDir) return "", nil, fmt.Errorf("error logging into OCI registry: %w", err) } @@ -198,11 +203,13 @@ func (c *nativeHelmChart) ExtractChart(chart string, version string, project str // 'helm pull' ensures that chart is downloaded into temp directory _, err = helmCmd.PullOCI(c.repoURL, chart, version, tempDest, c.creds) if err != nil { + _ = os.RemoveAll(tempDir) return "", nil, fmt.Errorf("error pulling OCI chart: %w", err) } } else { _, err = helmCmd.Fetch(c.repoURL, chart, version, tempDest, c.creds, passCredentials) if err != nil { + _ = os.RemoveAll(tempDir) return "", nil, fmt.Errorf("error fetching chart: %w", err) } } @@ -441,13 +448,23 @@ func (c *nativeHelmChart) GetTags(chart string, noCache bool) (*TagsList, error) }} repoHost, _, _ := strings.Cut(tagsURL, "/") + credential := auth.StaticCredential(repoHost, auth.Credential{ + Username: c.creds.Username, + Password: c.creds.Password, + }) + + // Try to fallback to the environment config, but we shouldn't error if the file is not set + if c.creds.Username == "" && c.creds.Password == "" { + store, _ := credentials.NewStoreFromDocker(credentials.StoreOptions{}) + if store != nil { + credential = credentials.Credential(store) + } + } + repo.Client = &auth.Client{ - Client: client, - Cache: nil, - Credential: auth.StaticCredential(repoHost, auth.Credential{ - Username: c.creds.Username, - Password: c.creds.Password, - }), + Client: client, + Cache: nil, + Credential: credential, } ctx := context.Background() diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go b/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go index 6b0e30ed2..28b458fa5 100644 --- a/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go +++ b/vendor/github.com/argoproj/argo-cd/v2/util/helm/cmd.go @@ -338,23 +338,42 @@ type TemplateOpts struct { Values []pathutil.ResolvedFilePath // ExtraValues is the randomly-generated path to the temporary values file holding the contents of // spec.source.helm.values/valuesObject. - ExtraValues pathutil.ResolvedFilePath - SkipCrds bool + ExtraValues pathutil.ResolvedFilePath + SkipCrds bool + SkipSchemaValidation bool + SkipTests bool } -var ( - re = regexp.MustCompile(`([^\\]),`) - apiVersionsRemover = regexp.MustCompile(`(--api-versions [^ ]+ )+`) -) - func cleanSetParameters(val string) string { // `{}` equal helm list parameters format, so don't escape `,`. if strings.HasPrefix(val, `{`) && strings.HasSuffix(val, `}`) { return val } - return re.ReplaceAllString(val, `$1\,`) + + val = replaceAllWithLookbehind(val, ',', `\,`, '\\') + return val } +func replaceAllWithLookbehind(val string, old rune, new string, lookbehind rune) string { + var result strings.Builder + var prevR rune + for _, r := range val { + if r == old { + if prevR != lookbehind { + result.WriteString(new) + } else { + result.WriteRune(old) + } + } else { + result.WriteRune(r) + } + prevR = r + } + return result.String() +} + +var apiVersionsRemover = regexp.MustCompile(`(--api-versions [^ ]+ )+`) + func (c *Cmd) template(chartPath string, opts *TemplateOpts) (string, string, error) { if callback, err := cleanupChartLockFile(filepath.Clean(path.Join(c.WorkDir, chartPath))); err == nil { defer callback() @@ -391,6 +410,12 @@ func (c *Cmd) template(chartPath string, opts *TemplateOpts) (string, string, er if !opts.SkipCrds { args = append(args, "--include-crds") } + if opts.SkipSchemaValidation { + args = append(args, "--skip-schema-validation") + } + if opts.SkipTests { + args = append(args, "--skip-tests") + } out, command, err := c.run(args...) if err != nil { diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/io/files/secure_mkdir_default.go b/vendor/github.com/argoproj/argo-cd/v2/util/io/files/secure_mkdir_default.go new file mode 100644 index 000000000..fe7733e2d --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/util/io/files/secure_mkdir_default.go @@ -0,0 +1,25 @@ +//go:build !linux + +package files + +import ( + "fmt" + "os" + + securejoin "github.com/cyphar/filepath-securejoin" +) + +// SecureMkdirAll creates a directory with the given mode and returns the full path to the directory. It prevents +// directory traversal attacks by ensuring the path is within the root directory. The path is constructed as if the +// given root is the root of the filesystem. So anything traversing outside the root is simply removed from the path. +func SecureMkdirAll(root, unsafePath string, mode os.FileMode) (string, error) { + fullPath, err := securejoin.SecureJoin(root, unsafePath) + if err != nil { + return "", fmt.Errorf("failed to construct secure path: %w", err) + } + err = os.MkdirAll(fullPath, mode) + if err != nil { + return "", fmt.Errorf("failed to create directory: %w", err) + } + return fullPath, nil +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/io/files/secure_mkdir_linux.go b/vendor/github.com/argoproj/argo-cd/v2/util/io/files/secure_mkdir_linux.go new file mode 100644 index 000000000..14f727dda --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/util/io/files/secure_mkdir_linux.go @@ -0,0 +1,25 @@ +//go:build linux + +package files + +import ( + "fmt" + "os" + + securejoin "github.com/cyphar/filepath-securejoin" +) + +// SecureMkdirAll creates a directory with the given mode and returns the full path to the directory. It prevents +// directory traversal attacks by ensuring the path is within the root directory. The path is constructed as if the +// given root is the root of the filesystem. So anything traversing outside the root is simply removed from the path. +func SecureMkdirAll(root, unsafePath string, mode os.FileMode) (string, error) { + err := securejoin.MkdirAll(root, unsafePath, int(mode)) + if err != nil { + return "", fmt.Errorf("failed to make directory: %w", err) + } + fullPath, err := securejoin.SecureJoin(root, unsafePath) + if err != nil { + return "", fmt.Errorf("failed to construct secure path: %w", err) + } + return fullPath, nil +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/jwt/jwt.go b/vendor/github.com/argoproj/argo-cd/v2/util/jwt/jwt.go new file mode 100644 index 000000000..585025990 --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/util/jwt/jwt.go @@ -0,0 +1,141 @@ +package jwt + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + jwtgo "github.com/golang-jwt/jwt/v4" +) + +// MapClaims converts a jwt.Claims to a MapClaims +func MapClaims(claims jwtgo.Claims) (jwtgo.MapClaims, error) { + if mapClaims, ok := claims.(*jwtgo.MapClaims); ok { + return *mapClaims, nil + } + claimsBytes, err := json.Marshal(claims) + if err != nil { + return nil, err + } + var mapClaims jwtgo.MapClaims + err = json.Unmarshal(claimsBytes, &mapClaims) + if err != nil { + return nil, err + } + return mapClaims, nil +} + +// StringField extracts a field from the claims as a string +func StringField(claims jwtgo.MapClaims, fieldName string) string { + if fieldIf, ok := claims[fieldName]; ok { + if field, ok := fieldIf.(string); ok { + return field + } + } + return "" +} + +// Float64Field extracts a field from the claims as a float64 +func Float64Field(claims jwtgo.MapClaims, fieldName string) float64 { + if fieldIf, ok := claims[fieldName]; ok { + if field, ok := fieldIf.(float64); ok { + return field + } + } + return 0 +} + +// GetScopeValues extracts the values of specified scopes from the claims +func GetScopeValues(claims jwtgo.MapClaims, scopes []string) []string { + groups := make([]string, 0) + for i := range scopes { + scopeIf, ok := claims[scopes[i]] + if !ok { + continue + } + + switch val := scopeIf.(type) { + case []interface{}: + for _, groupIf := range val { + group, ok := groupIf.(string) + if ok { + groups = append(groups, group) + } + } + case []string: + groups = append(groups, val...) + case string: + groups = append(groups, val) + } + } + + return groups +} + +func numField(m jwtgo.MapClaims, key string) (int64, error) { + field, ok := m[key] + if !ok { + return 0, fmt.Errorf("token does not have %s claim", key) + } + switch val := field.(type) { + case float64: + return int64(val), nil + case json.Number: + return val.Int64() + case int64: + return val, nil + default: + return 0, fmt.Errorf("%s '%v' is not a number", key, val) + } +} + +// IssuedAt returns the issued at as an int64 +func IssuedAt(m jwtgo.MapClaims) (int64, error) { + return numField(m, "iat") +} + +// IssuedAtTime returns the issued at as a time.Time +func IssuedAtTime(m jwtgo.MapClaims) (time.Time, error) { + iat, err := IssuedAt(m) + return time.Unix(iat, 0), err +} + +// ExpirationTime returns the expiration as a time.Time +func ExpirationTime(m jwtgo.MapClaims) (time.Time, error) { + exp, err := numField(m, "exp") + return time.Unix(exp, 0), err +} + +func Claims(in interface{}) jwtgo.Claims { + claims, ok := in.(jwtgo.Claims) + if ok { + return claims + } + return nil +} + +// IsMember returns whether or not the user's claims is a member of any of the groups +func IsMember(claims jwtgo.Claims, groups []string, scopes []string) bool { + mapClaims, err := MapClaims(claims) + if err != nil { + return false + } + // O(n^2) loop + for _, userGroup := range GetGroups(mapClaims, scopes) { + for _, group := range groups { + if userGroup == group { + return true + } + } + } + return false +} + +func GetGroups(mapClaims jwtgo.MapClaims, scopes []string) []string { + return GetScopeValues(mapClaims, scopes) +} + +func IsValid(token string) bool { + return len(strings.SplitN(token, ".", 3)) == 3 +} diff --git a/vendor/github.com/argoproj/argo-cd/v2/util/rbac/rbac.go b/vendor/github.com/argoproj/argo-cd/v2/util/rbac/rbac.go new file mode 100644 index 000000000..14bc91bd0 --- /dev/null +++ b/vendor/github.com/argoproj/argo-cd/v2/util/rbac/rbac.go @@ -0,0 +1,601 @@ +package rbac + +import ( + "context" + "encoding/csv" + "errors" + "fmt" + "sort" + "strings" + "sync" + "time" + + "github.com/argoproj/argo-cd/v2/util/assets" + "github.com/argoproj/argo-cd/v2/util/glob" + jwtutil "github.com/argoproj/argo-cd/v2/util/jwt" + + "github.com/casbin/casbin/v2" + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/util" + "github.com/casbin/govaluate" + "github.com/golang-jwt/jwt/v4" + gocache "github.com/patrickmn/go-cache" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + apiv1 "k8s.io/api/core/v1" + apierr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + v1 "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" +) + +const ( + ConfigMapPolicyCSVKey = "policy.csv" + ConfigMapPolicyDefaultKey = "policy.default" + ConfigMapScopesKey = "scopes" + ConfigMapMatchModeKey = "policy.matchMode" + GlobMatchMode = "glob" + RegexMatchMode = "regex" + + defaultRBACSyncPeriod = 10 * time.Minute +) + +// CasbinEnforcer represents methods that must be implemented by a Casbin enforces +type CasbinEnforcer interface { + EnableLog(bool) + Enforce(rvals ...interface{}) (bool, error) + LoadPolicy() error + EnableEnforce(bool) + AddFunction(name string, function govaluate.ExpressionFunction) + GetGroupingPolicy() ([][]string, error) +} + +const ( + // please add new items to Resources + ResourceClusters = "clusters" + ResourceProjects = "projects" + ResourceApplications = "applications" + ResourceApplicationSets = "applicationsets" + ResourceRepositories = "repositories" + ResourceWriteRepositories = "write-repositories" + ResourceCertificates = "certificates" + ResourceAccounts = "accounts" + ResourceGPGKeys = "gpgkeys" + ResourceLogs = "logs" + ResourceExec = "exec" + ResourceExtensions = "extensions" + + // please add new items to Actions + ActionGet = "get" + ActionCreate = "create" + ActionUpdate = "update" + ActionDelete = "delete" + ActionSync = "sync" + ActionOverride = "override" + ActionAction = "action" + ActionInvoke = "invoke" +) + +var ( + DefaultScopes = []string{"groups"} + Resources = []string{ + ResourceClusters, + ResourceProjects, + ResourceApplications, + ResourceApplicationSets, + ResourceRepositories, + ResourceWriteRepositories, + ResourceCertificates, + ResourceAccounts, + ResourceGPGKeys, + ResourceLogs, + ResourceExec, + ResourceExtensions, + } + Actions = []string{ + ActionGet, + ActionCreate, + ActionUpdate, + ActionDelete, + ActionSync, + ActionOverride, + ActionAction, + ActionInvoke, + } +) + +var ProjectScoped = map[string]bool{ + ResourceApplications: true, + ResourceApplicationSets: true, + ResourceLogs: true, + ResourceExec: true, + ResourceClusters: true, + ResourceRepositories: true, +} + +// Enforcer is a wrapper around an Casbin enforcer that: +// * is backed by a kubernetes config map +// * has a predefined RBAC model +// * supports a built-in policy +// * supports a user-defined policy +// * supports a custom JWT claims enforce function +type Enforcer struct { + lock sync.Mutex + enforcerCache *gocache.Cache + adapter *argocdAdapter + enableLog bool + enabled bool + clientset kubernetes.Interface + namespace string + configmap string + claimsEnforcerFunc ClaimsEnforcerFunc + model model.Model + defaultRole string + matchMode string +} + +// cachedEnforcer holds the Casbin enforcer instances and optional custom project policy +type cachedEnforcer struct { + enforcer CasbinEnforcer + policy string +} + +func (e *Enforcer) invalidateCache(actions ...func()) { + e.lock.Lock() + defer e.lock.Unlock() + + for _, action := range actions { + action() + } + e.enforcerCache.Flush() +} + +func (e *Enforcer) getCabinEnforcer(project string, policy string) CasbinEnforcer { + res, err := e.tryGetCabinEnforcer(project, policy) + if err != nil { + panic(err) + } + return res +} + +// tryGetCabinEnforcer returns the cached enforcer for the given optional project and project policy. +func (e *Enforcer) tryGetCabinEnforcer(project string, policy string) (CasbinEnforcer, error) { + e.lock.Lock() + defer e.lock.Unlock() + var cached *cachedEnforcer + val, ok := e.enforcerCache.Get(project) + if ok { + if c, ok := val.(*cachedEnforcer); ok && c.policy == policy { + cached = c + } + } + if cached != nil { + return cached.enforcer, nil + } + matchFunc := globMatchFunc + if e.matchMode == RegexMatchMode { + matchFunc = util.RegexMatchFunc + } + + var err error + var enforcer CasbinEnforcer + if policy != "" { + if enforcer, err = newEnforcerSafe(matchFunc, e.model, newAdapter(e.adapter.builtinPolicy, e.adapter.userDefinedPolicy, policy)); err != nil { + // fallback to default policy if project policy is invalid + log.Errorf("Failed to load project '%s' policy", project) + enforcer, err = newEnforcerSafe(matchFunc, e.model, e.adapter) + } + } else { + enforcer, err = newEnforcerSafe(matchFunc, e.model, e.adapter) + } + if err != nil { + return nil, err + } + + enforcer.AddFunction("globOrRegexMatch", matchFunc) + enforcer.EnableLog(e.enableLog) + enforcer.EnableEnforce(e.enabled) + e.enforcerCache.SetDefault(project, &cachedEnforcer{enforcer: enforcer, policy: policy}) + return enforcer, nil +} + +// ClaimsEnforcerFunc is func template to enforce a JWT claims. The subject is replaced +type ClaimsEnforcerFunc func(claims jwt.Claims, rvals ...interface{}) bool + +func newEnforcerSafe(matchFunction govaluate.ExpressionFunction, params ...interface{}) (e CasbinEnforcer, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%v", r) + e = nil + } + }() + enfs, err := casbin.NewCachedEnforcer(params...) + if err != nil { + return nil, err + } + enfs.AddFunction("globOrRegexMatch", matchFunction) + return enfs, nil +} + +func NewEnforcer(clientset kubernetes.Interface, namespace, configmap string, claimsEnforcer ClaimsEnforcerFunc) *Enforcer { + adapter := newAdapter("", "", "") + builtInModel := newBuiltInModel() + return &Enforcer{ + enforcerCache: gocache.New(time.Hour, time.Hour), + adapter: adapter, + clientset: clientset, + namespace: namespace, + configmap: configmap, + model: builtInModel, + claimsEnforcerFunc: claimsEnforcer, + enabled: true, + } +} + +// EnableLog executes casbin.Enforcer functionality. +func (e *Enforcer) EnableLog(s bool) { + e.invalidateCache(func() { + e.enableLog = s + }) +} + +// EnableEnforce executes casbin.Enforcer functionality and will invalidate cache if required. +func (e *Enforcer) EnableEnforce(s bool) { + e.invalidateCache(func() { + e.enabled = s + }) +} + +// LoadPolicy executes casbin.Enforcer functionality and will invalidate cache if required. +func (e *Enforcer) LoadPolicy() error { + _, err := e.tryGetCabinEnforcer("", "") + return err +} + +// Glob match func +func globMatchFunc(args ...interface{}) (interface{}, error) { + if len(args) < 2 { + return false, nil + } + val, ok := args[0].(string) + if !ok { + return false, nil + } + + pattern, ok := args[1].(string) + if !ok { + return false, nil + } + + return glob.Match(pattern, val), nil +} + +// SetMatchMode set match mode on runtime, glob match or regex match +func (e *Enforcer) SetMatchMode(mode string) { + e.invalidateCache(func() { + if mode == RegexMatchMode { + e.matchMode = RegexMatchMode + } else { + e.matchMode = GlobMatchMode + } + }) +} + +// SetDefaultRole sets a default role to use during enforcement. Will fall back to this role if +// normal enforcement fails +func (e *Enforcer) SetDefaultRole(roleName string) { + e.defaultRole = roleName +} + +// SetClaimsEnforcerFunc sets a claims enforce function during enforcement. The claims enforce function +// can extract claims from JWT token and do the proper enforcement based on user, group or any information +// available in the input parameter list +func (e *Enforcer) SetClaimsEnforcerFunc(claimsEnforcer ClaimsEnforcerFunc) { + e.claimsEnforcerFunc = claimsEnforcer +} + +// Enforce is a wrapper around casbin.Enforce to additionally enforce a default role and a custom +// claims function +func (e *Enforcer) Enforce(rvals ...interface{}) bool { + return enforce(e.getCabinEnforcer("", ""), e.defaultRole, e.claimsEnforcerFunc, rvals...) +} + +// EnforceErr is a convenience helper to wrap a failed enforcement with a detailed error about the request +func (e *Enforcer) EnforceErr(rvals ...interface{}) error { + if !e.Enforce(rvals...) { + errMsg := "permission denied" + if len(rvals) > 0 { + rvalsStrs := make([]string, len(rvals)-1) + for i, rval := range rvals[1:] { + rvalsStrs[i] = fmt.Sprintf("%s", rval) + } + switch s := rvals[0].(type) { + case jwt.Claims: + claims, err := jwtutil.MapClaims(s) + if err != nil { + break + } + if sub := jwtutil.StringField(claims, "sub"); sub != "" { + rvalsStrs = append(rvalsStrs, fmt.Sprintf("sub: %s", sub)) + } + if issuedAtTime, err := jwtutil.IssuedAtTime(claims); err == nil { + rvalsStrs = append(rvalsStrs, fmt.Sprintf("iat: %s", issuedAtTime.Format(time.RFC3339))) + } + } + errMsg = fmt.Sprintf("%s: %s", errMsg, strings.Join(rvalsStrs, ", ")) + } + return status.Error(codes.PermissionDenied, errMsg) + } + return nil +} + +// EnforceRuntimePolicy enforces a policy defined at run-time which augments the built-in and +// user-defined policy. This allows any explicit denies of the built-in, and user-defined policies +// to override the run-time policy. Runs normal enforcement if run-time policy is empty. +func (e *Enforcer) EnforceRuntimePolicy(project string, policy string, rvals ...interface{}) bool { + enf := e.CreateEnforcerWithRuntimePolicy(project, policy) + return e.EnforceWithCustomEnforcer(enf, rvals...) +} + +// CreateEnforcerWithRuntimePolicy creates an enforcer with a policy defined at run-time which augments the built-in and +// user-defined policy. This allows any explicit denies of the built-in, and user-defined policies +// to override the run-time policy. Runs normal enforcement if run-time policy is empty. +func (e *Enforcer) CreateEnforcerWithRuntimePolicy(project string, policy string) CasbinEnforcer { + return e.getCabinEnforcer(project, policy) +} + +// EnforceWithCustomEnforcer wraps enforce with an custom enforcer +func (e *Enforcer) EnforceWithCustomEnforcer(enf CasbinEnforcer, rvals ...interface{}) bool { + return enforce(enf, e.defaultRole, e.claimsEnforcerFunc, rvals...) +} + +// enforce is a helper to additionally check a default role and invoke a custom claims enforcement function +func enforce(enf CasbinEnforcer, defaultRole string, claimsEnforcerFunc ClaimsEnforcerFunc, rvals ...interface{}) bool { + // check the default role + if defaultRole != "" && len(rvals) >= 2 { + if ok, err := enf.Enforce(append([]interface{}{defaultRole}, rvals[1:]...)...); ok && err == nil { + return true + } + } + if len(rvals) == 0 { + return false + } + // check if subject is jwt.Claims vs. a normal subject string and run custom claims + // enforcement func (if set) + sub := rvals[0] + switch s := sub.(type) { + case string: + // noop + case jwt.Claims: + if claimsEnforcerFunc != nil && claimsEnforcerFunc(s, rvals...) { + return true + } + rvals = append([]interface{}{""}, rvals[1:]...) + default: + rvals = append([]interface{}{""}, rvals[1:]...) + } + ok, err := enf.Enforce(rvals...) + return ok && err == nil +} + +// SetBuiltinPolicy sets a built-in policy, which augments any user defined policies +func (e *Enforcer) SetBuiltinPolicy(policy string) error { + e.invalidateCache(func() { + e.adapter.builtinPolicy = policy + }) + return e.LoadPolicy() +} + +// SetUserPolicy sets a user policy, augmenting the built-in policy +func (e *Enforcer) SetUserPolicy(policy string) error { + e.invalidateCache(func() { + e.adapter.userDefinedPolicy = policy + }) + return e.LoadPolicy() +} + +// newInformers returns an informer which watches updates on the rbac configmap +func (e *Enforcer) newInformer() cache.SharedIndexInformer { + tweakConfigMap := func(options *metav1.ListOptions) { + cmFieldSelector := fields.ParseSelectorOrDie(fmt.Sprintf("metadata.name=%s", e.configmap)) + options.FieldSelector = cmFieldSelector.String() + } + indexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc} + return v1.NewFilteredConfigMapInformer(e.clientset, e.namespace, defaultRBACSyncPeriod, indexers, tweakConfigMap) +} + +// RunPolicyLoader runs the policy loader which watches policy updates from the configmap and reloads them +func (e *Enforcer) RunPolicyLoader(ctx context.Context, onUpdated func(cm *apiv1.ConfigMap) error) error { + cm, err := e.clientset.CoreV1().ConfigMaps(e.namespace).Get(ctx, e.configmap, metav1.GetOptions{}) + if err != nil { + if !apierr.IsNotFound(err) { + return err + } + } else { + err = e.syncUpdate(cm, onUpdated) + if err != nil { + return err + } + } + e.runInformer(ctx, onUpdated) + return nil +} + +func (e *Enforcer) runInformer(ctx context.Context, onUpdated func(cm *apiv1.ConfigMap) error) { + cmInformer := e.newInformer() + _, err := cmInformer.AddEventHandler( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if cm, ok := obj.(*apiv1.ConfigMap); ok { + err := e.syncUpdate(cm, onUpdated) + if err != nil { + log.Error(err) + } else { + log.Infof("RBAC ConfigMap '%s' added", e.configmap) + } + } + }, + UpdateFunc: func(old, new interface{}) { + oldCM := old.(*apiv1.ConfigMap) + newCM := new.(*apiv1.ConfigMap) + if oldCM.ResourceVersion == newCM.ResourceVersion { + return + } + err := e.syncUpdate(newCM, onUpdated) + if err != nil { + log.Error(err) + } else { + log.Infof("RBAC ConfigMap '%s' updated", e.configmap) + } + }, + }, + ) + if err != nil { + log.Error(err) + } + log.Info("Starting rbac config informer") + cmInformer.Run(ctx.Done()) + log.Info("rbac configmap informer cancelled") +} + +// PolicyCSV will generate the final policy csv to be used +// by Argo CD RBAC. It will find entries in the given data +// that matches the policy key name convention: +// +// policy[.overlay].csv +func PolicyCSV(data map[string]string) string { + var strBuilder strings.Builder + // add the main policy first + if p, ok := data[ConfigMapPolicyCSVKey]; ok { + strBuilder.WriteString(p) + } + + keys := make([]string, 0, len(data)) + for k := range data { + keys = append(keys, k) + } + sort.Strings(keys) + + // append additional policies at the end of the csv + for _, key := range keys { + value := data[key] + if strings.HasPrefix(key, "policy.") && + strings.HasSuffix(key, ".csv") && + key != ConfigMapPolicyCSVKey { + strBuilder.WriteString("\n") + strBuilder.WriteString(value) + } + } + return strBuilder.String() +} + +// syncUpdate updates the enforcer +func (e *Enforcer) syncUpdate(cm *apiv1.ConfigMap, onUpdated func(cm *apiv1.ConfigMap) error) error { + e.SetDefaultRole(cm.Data[ConfigMapPolicyDefaultKey]) + e.SetMatchMode(cm.Data[ConfigMapMatchModeKey]) + policyCSV := PolicyCSV(cm.Data) + if err := onUpdated(cm); err != nil { + return err + } + return e.SetUserPolicy(policyCSV) +} + +// ValidatePolicy verifies a policy string is acceptable to casbin +func ValidatePolicy(policy string) error { + _, err := newEnforcerSafe(globMatchFunc, newBuiltInModel(), newAdapter("", "", policy)) + if err != nil { + return fmt.Errorf("policy syntax error: %s", policy) + } + return nil +} + +// newBuiltInModel is a helper to return a brand new casbin model from the built-in model string. +// This is needed because it is not safe to re-use the same casbin Model when instantiating new +// casbin enforcers. +func newBuiltInModel() model.Model { + m, err := model.NewModelFromString(assets.ModelConf) + if err != nil { + panic(err) + } + return m +} + +// Casbin adapter which satisfies persist.Adapter interface +type argocdAdapter struct { + builtinPolicy string + userDefinedPolicy string + runtimePolicy string +} + +func newAdapter(builtinPolicy, userDefinedPolicy, runtimePolicy string) *argocdAdapter { + return &argocdAdapter{ + builtinPolicy: builtinPolicy, + userDefinedPolicy: userDefinedPolicy, + runtimePolicy: runtimePolicy, + } +} + +func (a *argocdAdapter) LoadPolicy(model model.Model) error { + for _, policyStr := range []string{a.builtinPolicy, a.userDefinedPolicy, a.runtimePolicy} { + for _, line := range strings.Split(policyStr, "\n") { + if err := loadPolicyLine(strings.TrimSpace(line), model); err != nil { + return err + } + } + } + return nil +} + +// The modified version of LoadPolicyLine function defined in "persist" package of github.com/casbin/casbin. +// Uses CVS parser to correctly handle quotes in policy line. +func loadPolicyLine(line string, model model.Model) error { + if line == "" || strings.HasPrefix(line, "#") { + return nil + } + + reader := csv.NewReader(strings.NewReader(line)) + reader.TrimLeadingSpace = true + tokens, err := reader.Read() + if err != nil { + return err + } + + tokenLen := len(tokens) + + if tokenLen < 1 || + tokens[0] == "" || + (tokens[0] == "g" && tokenLen != 3) || + (tokens[0] == "p" && tokenLen != 6) { + return fmt.Errorf("invalid RBAC policy: %s", line) + } + + key := tokens[0] + sec := key[:1] + if _, ok := model[sec]; !ok { + return fmt.Errorf("invalid RBAC policy: %s", line) + } + if _, ok := model[sec][key]; !ok { + return fmt.Errorf("invalid RBAC policy: %s", line) + } + model[sec][key].Policy = append(model[sec][key].Policy, tokens[1:]) + return nil +} + +func (a *argocdAdapter) SavePolicy(model model.Model) error { + return errors.New("not implemented") +} + +func (a *argocdAdapter) AddPolicy(sec string, ptype string, rule []string) error { + return errors.New("not implemented") +} + +func (a *argocdAdapter) RemovePolicy(sec string, ptype string, rule []string) error { + return errors.New("not implemented") +} + +func (a *argocdAdapter) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error { + return errors.New("not implemented") +} diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/diff/diff.go b/vendor/github.com/argoproj/gitops-engine/pkg/diff/diff.go index 2278222c3..52653771e 100644 --- a/vendor/github.com/argoproj/gitops-engine/pkg/diff/diff.go +++ b/vendor/github.com/argoproj/gitops-engine/pkg/diff/diff.go @@ -7,6 +7,7 @@ package diff import ( "bytes" "context" + "encoding/base64" "encoding/json" "errors" "fmt" @@ -35,6 +36,7 @@ import ( const ( couldNotMarshalErrMsg = "Could not unmarshal to object of type %s: %v" AnnotationLastAppliedConfig = "kubectl.kubernetes.io/last-applied-configuration" + replacement = "++++++++" ) // Holds diffing result of two resources @@ -194,12 +196,12 @@ func serverSideDiff(config, live *unstructured.Unstructured, opts ...Option) (*D return buildDiffResult(predictedLiveBytes, liveBytes), nil } -// removeWebhookMutation will compare the predictedLive with live to identify -// changes done by mutation webhooks. Webhook mutations are identified by finding -// changes in predictedLive fields not associated with any manager in the -// managedFields. All fields under this condition will be reverted with their state -// from live. If the given predictedLive does not have the managedFields, an error -// will be returned. +// removeWebhookMutation will compare the predictedLive with live to identify changes done by mutation webhooks. +// Webhook mutations are removed from predictedLive by removing all fields which are not managed by the given 'manager'. +// At this step, we will only have the fields that are managed by the given 'manager'. +// It is then merged with the live state and re-assigned to predictedLive. This means that any +// fields not managed by the specified manager will be reverted with their state from live, including any webhook mutations. +// If the given predictedLive does not have the managedFields, an error will be returned. func removeWebhookMutation(predictedLive, live *unstructured.Unstructured, gvkParser *managedfields.GvkParser, manager string) (*unstructured.Unstructured, error) { plManagedFields := predictedLive.GetManagedFields() if len(plManagedFields) == 0 { @@ -221,57 +223,42 @@ func removeWebhookMutation(predictedLive, live *unstructured.Unstructured, gvkPa return nil, fmt.Errorf("error converting live state from unstructured to %s: %w", gvk, err) } - // Compare the predicted live with the live resource - comparison, err := typedLive.Compare(typedPredictedLive) - if err != nil { - return nil, fmt.Errorf("error comparing predicted resource to live resource: %w", err) - } + // Initialize an empty fieldpath.Set to aggregate managed fields for the specified manager + managerFieldsSet := &fieldpath.Set{} - // Loop over all existing managers in predicted live resource to identify - // fields mutated (in predicted live) not owned by any manager. + // Iterate over all ManagedFields entries in predictedLive for _, mfEntry := range plManagedFields { - mfs := &fieldpath.Set{} - err := mfs.FromJSON(bytes.NewReader(mfEntry.FieldsV1.Raw)) + managedFieldsSet := &fieldpath.Set{} + err := managedFieldsSet.FromJSON(bytes.NewReader(mfEntry.FieldsV1.Raw)) if err != nil { return nil, fmt.Errorf("error building managedFields set: %s", err) } - if comparison.Added != nil && !comparison.Added.Empty() { - // exclude the added fields owned by this manager from the comparison - comparison.Added = comparison.Added.Difference(mfs) - } - if comparison.Modified != nil && !comparison.Modified.Empty() { - // exclude the modified fields owned by this manager from the comparison - comparison.Modified = comparison.Modified.Difference(mfs) - } - if comparison.Removed != nil && !comparison.Removed.Empty() { - // exclude the removed fields owned by this manager from the comparison - comparison.Removed = comparison.Removed.Difference(mfs) + if mfEntry.Manager == manager { + // Union the fields with the aggregated set + managerFieldsSet = managerFieldsSet.Union(managedFieldsSet) } } - // At this point, comparison holds all mutations that aren't owned by any - // of the existing managers. - if comparison.Added != nil && !comparison.Added.Empty() { - // remove added fields that aren't owned by any manager - typedPredictedLive = typedPredictedLive.RemoveItems(comparison.Added) + if managerFieldsSet.Empty() { + return nil, fmt.Errorf("no managed fields found for manager: %s", manager) } - if comparison.Modified != nil && !comparison.Modified.Empty() { - liveModValues := typedLive.ExtractItems(comparison.Modified) - // revert modified fields not owned by any manager - typedPredictedLive, err = typedPredictedLive.Merge(liveModValues) - if err != nil { - return nil, fmt.Errorf("error reverting webhook modified fields in predicted live resource: %s", err) - } + predictedLiveFieldSet, err := typedPredictedLive.ToFieldSet() + if err != nil { + return nil, fmt.Errorf("error converting predicted live state to FieldSet: %w", err) } - if comparison.Removed != nil && !comparison.Removed.Empty() { - liveRmValues := typedLive.ExtractItems(comparison.Removed) - // revert removed fields not owned by any manager - typedPredictedLive, err = typedPredictedLive.Merge(liveRmValues) - if err != nil { - return nil, fmt.Errorf("error reverting webhook removed fields in predicted live resource: %s", err) - } + // Remove fields from predicted live that are not managed by the provided manager + nonArgoFieldsSet := predictedLiveFieldSet.Difference(managerFieldsSet) + + // In case any of the removed fields cause schema violations, we will keep those fields + nonArgoFieldsSet = safelyRemoveFieldsSet(typedPredictedLive, nonArgoFieldsSet) + typedPredictedLive = typedPredictedLive.RemoveItems(nonArgoFieldsSet) + + // Apply the predicted live state to the live state to get a diff without mutation webhook fields + typedPredictedLive, err = typedLive.Merge(typedPredictedLive) + if err != nil { + return nil, fmt.Errorf("error applying predicted live to live state: %w", err) } plu := typedPredictedLive.AsValue().Unstructured() @@ -282,6 +269,31 @@ func removeWebhookMutation(predictedLive, live *unstructured.Unstructured, gvkPa return &unstructured.Unstructured{Object: pl}, nil } +// safelyRemoveFieldSet will validate if removing the fieldsToRemove set from predictedLive maintains +// a valid schema. If removing a field in fieldsToRemove is invalid and breaks the schema, it is not safe +// to remove and will be skipped from removal from predictedLive. +func safelyRemoveFieldsSet(predictedLive *typed.TypedValue, fieldsToRemove *fieldpath.Set) *fieldpath.Set { + // In some cases, we cannot remove fields due to violation of the predicted live schema. In such cases we validate the removal + // of each field and only include it if the removal is valid. + testPredictedLive := predictedLive.RemoveItems(fieldsToRemove) + err := testPredictedLive.Validate() + if err != nil { + adjustedFieldsToRemove := fieldpath.NewSet() + fieldsToRemove.Iterate(func(p fieldpath.Path) { + singleFieldSet := fieldpath.NewSet(p) + testSingleRemoval := predictedLive.RemoveItems(singleFieldSet) + // Check if removing this single field maintains a valid schema + if testSingleRemoval.Validate() == nil { + // If valid, add this field to the adjusted set to remove + adjustedFieldsToRemove.Insert(p) + } + }) + return adjustedFieldsToRemove + } + // If no violations, return the original set to remove + return fieldsToRemove +} + func jsonStrToUnstructured(jsonString string) (*unstructured.Unstructured, error) { res := make(map[string]interface{}) err := json.Unmarshal([]byte(jsonString), &res) @@ -664,31 +676,9 @@ func ThreeWayDiff(orig, config, live *unstructured.Unstructured) (*DiffResult, e } } - predictedLive := &unstructured.Unstructured{} - err = json.Unmarshal(predictedLiveBytes, predictedLive) - if err != nil { - return nil, err - } - return buildDiffResult(predictedLiveBytes, liveBytes), nil } -// stripTypeInformation strips any type information (e.g. float64 vs. int) from the unstructured -// object by remarshalling the object. This is important for diffing since it will cause godiff -// to report a false difference. -func stripTypeInformation(un *unstructured.Unstructured) *unstructured.Unstructured { - unBytes, err := json.Marshal(un) - if err != nil { - panic(err) - } - var newUn unstructured.Unstructured - err = json.Unmarshal(unBytes, &newUn) - if err != nil { - panic(err) - } - return &newUn -} - // removeNamespaceAnnotation remove the namespace and an empty annotation map from the metadata. // The namespace field is present in live (namespaced) objects, but not necessarily present in // config or last-applied. This results in a diff which we don't care about. We delete the two so @@ -864,6 +854,32 @@ func NormalizeSecret(un *unstructured.Unstructured, opts ...Option) { if gvk.Group != "" || gvk.Kind != "Secret" { return } + + // move stringData to data section + if stringData, found, err := unstructured.NestedMap(un.Object, "stringData"); found && err == nil { + var data map[string]interface{} + data, found, _ = unstructured.NestedMap(un.Object, "data") + if !found { + data = make(map[string]interface{}) + } + + // base64 encode string values and add non-string values as is. + // This ensures that the apply fails if the secret is invalid. + for k, v := range stringData { + strVal, ok := v.(string) + if ok { + data[k] = base64.StdEncoding.EncodeToString([]byte(strVal)) + } else { + data[k] = v + } + } + + err := unstructured.SetNestedField(un.Object, data, "data") + if err == nil { + delete(un.Object, "stringData") + } + } + o := applyOptions(opts) var secret corev1.Secret err := runtime.DefaultUnstructuredConverter.FromUnstructured(un.Object, &secret) @@ -877,15 +893,6 @@ func NormalizeSecret(un *unstructured.Unstructured, opts ...Option) { secret.Data[k] = []byte("") } } - if len(secret.StringData) > 0 { - if secret.Data == nil { - secret.Data = make(map[string][]byte) - } - for k, v := range secret.StringData { - secret.Data[k] = []byte(v) - } - delete(un.Object, "stringData") - } newObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&secret) if err != nil { o.log.Error(err, "object unable to convert from secret") @@ -991,13 +998,13 @@ func CreateTwoWayMergePatch(orig, new, dataStruct interface{}) ([]byte, bool, er return patch, string(patch) != "{}", nil } -// HideSecretData replaces secret data values in specified target, live secrets and in last applied configuration of live secret with stars. Also preserves differences between -// target, live and last applied config values. E.g. if all three are equal the values would be replaced with same number of stars. If all the are different then number of stars +// HideSecretData replaces secret data & optional annotations values in specified target, live secrets and in last applied configuration of live secret with plus(+). Also preserves differences between +// target, live and last applied config values. E.g. if all three are equal the values would be replaced with same number of plus(+). If all are different then number of plus(+) // in replacement should be different. -func HideSecretData(target *unstructured.Unstructured, live *unstructured.Unstructured) (*unstructured.Unstructured, *unstructured.Unstructured, error) { - var orig *unstructured.Unstructured +func HideSecretData(target *unstructured.Unstructured, live *unstructured.Unstructured, hideAnnotations map[string]bool) (*unstructured.Unstructured, *unstructured.Unstructured, error) { + var liveLastAppliedAnnotation *unstructured.Unstructured if live != nil { - orig, _ = GetLastAppliedConfigAnnotation(live) + liveLastAppliedAnnotation, _ = GetLastAppliedConfigAnnotation(live) live = live.DeepCopy() } if target != nil { @@ -1005,7 +1012,7 @@ func HideSecretData(target *unstructured.Unstructured, live *unstructured.Unstru } keys := map[string]bool{} - for _, obj := range []*unstructured.Unstructured{target, live, orig} { + for _, obj := range []*unstructured.Unstructured{target, live, liveLastAppliedAnnotation} { if obj == nil { continue } @@ -1017,25 +1024,57 @@ func HideSecretData(target *unstructured.Unstructured, live *unstructured.Unstru } } + var err error + target, live, liveLastAppliedAnnotation, err = hide(target, live, liveLastAppliedAnnotation, keys, "data") + if err != nil { + return nil, nil, err + } + + target, live, liveLastAppliedAnnotation, err = hide(target, live, liveLastAppliedAnnotation, hideAnnotations, "metadata", "annotations") + if err != nil { + return nil, nil, err + } + + if live != nil && liveLastAppliedAnnotation != nil { + annotations := live.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + // special case: hide "kubectl.kubernetes.io/last-applied-configuration" annotation + if _, ok := hideAnnotations[corev1.LastAppliedConfigAnnotation]; ok { + annotations[corev1.LastAppliedConfigAnnotation] = replacement + } else { + lastAppliedData, err := json.Marshal(liveLastAppliedAnnotation) + if err != nil { + return nil, nil, fmt.Errorf("error marshaling json: %s", err) + } + annotations[corev1.LastAppliedConfigAnnotation] = string(lastAppliedData) + } + live.SetAnnotations(annotations) + } + return target, live, nil +} + +func hide(target, live, liveLastAppliedAnnotation *unstructured.Unstructured, keys map[string]bool, fields ...string) (*unstructured.Unstructured, *unstructured.Unstructured, *unstructured.Unstructured, error) { for k := range keys { // we use "+" rather than the more common "*" - nextReplacement := "++++++++" + nextReplacement := replacement valToReplacement := make(map[string]string) - for _, obj := range []*unstructured.Unstructured{target, live, orig} { + for _, obj := range []*unstructured.Unstructured{target, live, liveLastAppliedAnnotation} { var data map[string]interface{} if obj != nil { // handles an edge case when secret data has nil value // https://github.com/argoproj/argo-cd/issues/5584 - dataValue, ok := obj.Object["data"] + dataValue, ok, _ := unstructured.NestedFieldCopy(obj.Object, fields...) if ok { if dataValue == nil { continue } } var err error - data, _, err = unstructured.NestedMap(obj.Object, "data") + data, _, err = unstructured.NestedMap(obj.Object, fields...) if err != nil { - return nil, nil, fmt.Errorf("unstructured.NestedMap error: %s", err) + return nil, nil, nil, fmt.Errorf("unstructured.NestedMap error: %s", err) } } if data == nil { @@ -1053,25 +1092,13 @@ func HideSecretData(target *unstructured.Unstructured, live *unstructured.Unstru valToReplacement[val] = replacement } data[k] = replacement - err := unstructured.SetNestedField(obj.Object, data, "data") + err := unstructured.SetNestedField(obj.Object, data, fields...) if err != nil { - return nil, nil, fmt.Errorf("unstructured.SetNestedField error: %s", err) + return nil, nil, nil, fmt.Errorf("unstructured.SetNestedField error: %s", err) } } } - if live != nil && orig != nil { - annotations := live.GetAnnotations() - if annotations == nil { - annotations = make(map[string]string) - } - lastAppliedData, err := json.Marshal(orig) - if err != nil { - return nil, nil, fmt.Errorf("error marshaling json: %s", err) - } - annotations[corev1.LastAppliedConfigAnnotation] = string(lastAppliedData) - live.SetAnnotations(annotations) - } - return target, live, nil + return target, live, liveLastAppliedAnnotation, nil } func toString(val interface{}) string { @@ -1087,11 +1114,20 @@ func toString(val interface{}) string { // Remarshalling also strips any type information (e.g. float64 vs. int) from the unstructured // object. This is important for diffing since it will cause godiff to report a false difference. func remarshal(obj *unstructured.Unstructured, o options) *unstructured.Unstructured { - obj = stripTypeInformation(obj) data, err := json.Marshal(obj) if err != nil { panic(err) } + + // Unmarshal again to strip type information (e.g. float64 vs. int) from the unstructured + // object. This is important for diffing since it will cause godiff to report a false difference. + var newUn unstructured.Unstructured + err = json.Unmarshal(data, &newUn) + if err != nil { + panic(err) + } + obj = &newUn + gvk := obj.GroupVersionKind() item, err := scheme.Scheme.New(obj.GroupVersionKind()) if err != nil { diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/diff/diff_options.go b/vendor/github.com/argoproj/gitops-engine/pkg/diff/diff_options.go index 069b2a0ed..b2d69bec3 100644 --- a/vendor/github.com/argoproj/gitops-engine/pkg/diff/diff_options.go +++ b/vendor/github.com/argoproj/gitops-engine/pkg/diff/diff_options.go @@ -40,7 +40,7 @@ func applyOptions(opts []Option) options { } type KubeApplier interface { - ApplyResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string, serverSideDiff bool) (string, error) + ApplyResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string) (string, error) } // ServerSideDryRunner defines the contract to run a server-side apply in @@ -66,7 +66,7 @@ func NewK8sServerSideDryRunner(kubeApplier KubeApplier) *K8sServerSideDryRunner // obj and the given manager in dryrun mode. Will return the predicted live state // json as string. func (kdr *K8sServerSideDryRunner) Run(ctx context.Context, obj *unstructured.Unstructured, manager string) (string, error) { - return kdr.dryrunApplier.ApplyResource(ctx, obj, cmdutil.DryRunServer, false, false, true, manager, true) + return kdr.dryrunApplier.ApplyResource(ctx, obj, cmdutil.DryRunServer, false, false, true, manager) } func IgnoreAggregatedRoles(ignore bool) Option { diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/sync/common/types.go b/vendor/github.com/argoproj/gitops-engine/pkg/sync/common/types.go index 7399cc78a..b02ad8c20 100644 --- a/vendor/github.com/argoproj/gitops-engine/pkg/sync/common/types.go +++ b/vendor/github.com/argoproj/gitops-engine/pkg/sync/common/types.go @@ -16,6 +16,7 @@ const ( AnnotationKeyHook = "argocd.argoproj.io/hook" // AnnotationKeyHookDeletePolicy is the policy of deleting a hook AnnotationKeyHookDeletePolicy = "argocd.argoproj.io/hook-delete-policy" + AnnotationDeletionApproved = "argocd.argoproj.io/deletion-approved" // Sync option that disables dry run in resource is missing in the cluster SyncOptionSkipDryRunOnMissingResource = "SkipDryRunOnMissingResource=true" @@ -31,10 +32,16 @@ const ( SyncOptionForce = "Force=true" // Sync option that enables use of --server-side flag instead of client-side SyncOptionServerSideApply = "ServerSideApply=true" + // Sync option that disables use of --server-side flag instead of client-side + SyncOptionDisableServerSideApply = "ServerSideApply=false" // Sync option that disables resource deletion SyncOptionDisableDeletion = "Delete=false" // Sync option that sync only out of sync resources SyncOptionApplyOutOfSyncOnly = "ApplyOutOfSyncOnly=true" + // Sync option that requires confirmation before deleting the resource + SyncOptionDeleteRequireConfirm = "Delete=confirm" + // Sync option that requires confirmation before deleting the resource + SyncOptionPruneRequireConfirm = "Prune=confirm" ) type PermissionValidator func(un *unstructured.Unstructured, res *metav1.APIResource) error diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/ctl.go b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/ctl.go index 3f494b389..a80c63ea5 100644 --- a/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/ctl.go +++ b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/ctl.go @@ -19,6 +19,7 @@ import ( "k8s.io/kube-openapi/pkg/util/proto" "k8s.io/kubectl/pkg/util/openapi" + "github.com/argoproj/gitops-engine/pkg/diff" utils "github.com/argoproj/gitops-engine/pkg/utils/io" "github.com/argoproj/gitops-engine/pkg/utils/tracing" ) @@ -296,6 +297,31 @@ func (k *KubectlCmd) ManageResources(config *rest.Config, openAPISchema openapi. }, cleanup, nil } +func ManageServerSideDiffDryRuns(config *rest.Config, openAPISchema openapi.Resources, tracer tracing.Tracer, log logr.Logger, onKubectlRun OnKubectlRunFunc) (diff.KubeApplier, func(), error) { + f, err := os.CreateTemp(utils.TempDir, "") + if err != nil { + return nil, nil, fmt.Errorf("failed to generate temp file for kubeconfig: %v", err) + } + _ = f.Close() + err = WriteKubeConfig(config, "", f.Name()) + if err != nil { + utils.DeleteFile(f.Name()) + return nil, nil, fmt.Errorf("failed to write kubeconfig: %v", err) + } + fact := kubeCmdFactory(f.Name(), "", config) + cleanup := func() { + utils.DeleteFile(f.Name()) + } + return &kubectlServerSideDiffDryRunApplier{ + config: config, + fact: fact, + openAPISchema: openAPISchema, + tracer: tracer, + log: log, + onKubectlRun: onKubectlRun, + }, cleanup, nil +} + // ConvertToVersion converts an unstructured object into the specified group/version func (k *KubectlCmd) ConvertToVersion(obj *unstructured.Unstructured, group string, version string) (*unstructured.Unstructured, error) { span := k.Tracer.StartSpan("ConvertToVersion") diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/kube.go b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/kube.go index f88ed172b..9db109062 100644 --- a/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/kube.go +++ b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/kube.go @@ -205,12 +205,15 @@ var ( // See ApplyOpts::Run() // cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patchBytes, info), info.Source, err) kubectlApplyPatchErrOutRegexp = regexp.MustCompile(`(?s)^error when applying patch:.*\nfor: "\S+": `) + + kubectlErrOutMapRegexp = regexp.MustCompile(`map\[.*\]`) ) // cleanKubectlOutput makes the error output of kubectl a little better to read func cleanKubectlOutput(s string) string { s = strings.TrimSpace(s) s = kubectlErrOutRegexp.ReplaceAllString(s, "") + s = kubectlErrOutMapRegexp.ReplaceAllString(s, "") s = kubectlApplyPatchErrOutRegexp.ReplaceAllString(s, "") s = strings.Replace(s, "; if you choose to ignore these errors, turn validation off with --validate=false", "", -1) return s diff --git a/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/resource_ops.go b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/resource_ops.go index 00d8a05fa..8f5f29665 100644 --- a/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/resource_ops.go +++ b/vendor/github.com/argoproj/gitops-engine/pkg/utils/kube/resource_ops.go @@ -39,12 +39,13 @@ import ( // ResourceOperations provides methods to manage k8s resources type ResourceOperations interface { - ApplyResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string, serverSideDiff bool) (string, error) + ApplyResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string) (string, error) ReplaceResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force bool) (string, error) CreateResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, validate bool) (string, error) UpdateResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy) (*unstructured.Unstructured, error) } +// This is a generic implementation for doing most kubectl operations. Implements the ResourceOperations interface. type kubectlResourceOperations struct { config *rest.Config log logr.Logger @@ -54,49 +55,72 @@ type kubectlResourceOperations struct { openAPISchema openapi.Resources } -type commandExecutor func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error +// This is an implementation specific for doing server-side diff dry runs. Implements the KubeApplier interface. +type kubectlServerSideDiffDryRunApplier struct { + config *rest.Config + log logr.Logger + tracer tracing.Tracer + onKubectlRun OnKubectlRunFunc + fact cmdutil.Factory + openAPISchema openapi.Resources +} -func (k *kubectlResourceOperations) runResourceCommand(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, serverSideDiff bool, executor commandExecutor) (string, error) { - manifestBytes, err := json.Marshal(obj) - if err != nil { - return "", err - } - manifestFile, err := os.CreateTemp(io.TempDir, "") - if err != nil { - return "", fmt.Errorf("Failed to generate temp file for manifest: %v", err) - } - defer io.DeleteFile(manifestFile.Name()) - if _, err = manifestFile.Write(manifestBytes); err != nil { - return "", fmt.Errorf("Failed to write manifest: %v", err) - } - if err = manifestFile.Close(); err != nil { - return "", fmt.Errorf("Failed to close manifest: %v", err) - } +type commandExecutor func(ioStreams genericclioptions.IOStreams, fileName string) error +func maybeLogManifest(manifestBytes []byte, log logr.Logger) error { // log manifest - if k.log.V(1).Enabled() { + if log.V(1).Enabled() { var obj unstructured.Unstructured err := json.Unmarshal(manifestBytes, &obj) if err != nil { - return "", err + return err } - redacted, _, err := diff.HideSecretData(&obj, nil) + redacted, _, err := diff.HideSecretData(&obj, nil, nil) if err != nil { - return "", err + return err } redactedBytes, err := json.Marshal(redacted) if err != nil { - return "", err + return err } - k.log.V(1).Info(string(redactedBytes)) + log.V(1).Info(string(redactedBytes)) } + return nil +} + +func createManifestFile(obj *unstructured.Unstructured, log logr.Logger) (*os.File, error) { + manifestBytes, err := json.Marshal(obj) + if err != nil { + return nil, err + } + manifestFile, err := os.CreateTemp(io.TempDir, "") + if err != nil { + return nil, fmt.Errorf("Failed to generate temp file for manifest: %v", err) + } + if _, err = manifestFile.Write(manifestBytes); err != nil { + return nil, fmt.Errorf("Failed to write manifest: %v", err) + } + if err = manifestFile.Close(); err != nil { + return nil, fmt.Errorf("Failed to close manifest: %v", err) + } + + err = maybeLogManifest(manifestBytes, log) + if err != nil { + return nil, err + } + return manifestFile, nil +} + +func (k *kubectlResourceOperations) runResourceCommand(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, executor commandExecutor) (string, error) { + manifestFile, err := createManifestFile(obj, k.log) + if err != nil { + return "", err + } + defer io.DeleteFile(manifestFile.Name()) var out []string // rbac resouces are first applied with auth reconcile kubectl feature. - // serverSideDiff should avoid this step as the resources are not being actually - // applied but just running in dryrun mode. Also, kubectl auth reconcile doesn't - // currently support running dryrun in server mode. - if obj.GetAPIVersion() == "rbac.authorization.k8s.io/v1" && !serverSideDiff { + if obj.GetAPIVersion() == "rbac.authorization.k8s.io/v1" { outReconcile, err := k.rbacReconcile(ctx, obj, manifestFile.Name(), dryRunStrategy) if err != nil { return "", fmt.Errorf("error running rbacReconcile: %s", err) @@ -112,7 +136,7 @@ func (k *kubectlResourceOperations) runResourceCommand(ctx context.Context, obj Out: &bytes.Buffer{}, ErrOut: &bytes.Buffer{}, } - err = executor(k.fact, ioStreams, manifestFile.Name()) + err = executor(ioStreams, manifestFile.Name()) if err != nil { return "", errors.New(cleanKubectlOutput(err.Error())) } @@ -125,6 +149,40 @@ func (k *kubectlResourceOperations) runResourceCommand(ctx context.Context, obj return strings.Join(out, ". "), nil } +func (k *kubectlServerSideDiffDryRunApplier) runResourceCommand(obj *unstructured.Unstructured, executor commandExecutor) (string, error) { + manifestFile, err := createManifestFile(obj, k.log) + if err != nil { + return "", err + } + defer io.DeleteFile(manifestFile.Name()) + + stdoutBuf := &bytes.Buffer{} + stderrBuf := &bytes.Buffer{} + + // Run kubectl apply + ioStreams := genericclioptions.IOStreams{ + In: &bytes.Buffer{}, + Out: stdoutBuf, + ErrOut: stderrBuf, + } + err = executor(ioStreams, manifestFile.Name()) + if err != nil { + return "", errors.New(cleanKubectlOutput(err.Error())) + } + stdout := stdoutBuf.String() + stderr := stderrBuf.String() + + if stderr != "" && stdout == "" { + err := fmt.Errorf("Server-side dry run apply had non-empty stderr: %s", stderr) + k.log.Error(err, "server-side diff") + return "", err + } + if stderr != "" { + k.log.Info("Warning: Server-side dry run apply had non-empty stderr: %s", stderr) + } + return stdout, nil +} + // rbacReconcile will perform reconciliation for RBAC resources. It will run // the following command: // @@ -135,7 +193,7 @@ func (k *kubectlResourceOperations) runResourceCommand(ctx context.Context, obj // See: https://github.com/kubernetes/kubernetes/issues/66353 // `auth reconcile` will delete and recreate the resource if necessary func (k *kubectlResourceOperations) rbacReconcile(ctx context.Context, obj *unstructured.Unstructured, fileName string, dryRunStrategy cmdutil.DryRunStrategy) (string, error) { - cleanup, err := k.processKubectlRun("auth") + cleanup, err := processKubectlRun(k.onKubectlRun, "auth") if err != nil { return "", fmt.Errorf("error processing kubectl run auth: %w", err) } @@ -168,18 +226,18 @@ func (k *kubectlResourceOperations) ReplaceResource(ctx context.Context, obj *un span.SetBaggageItem("name", obj.GetName()) defer span.Finish() k.log.Info(fmt.Sprintf("Replacing resource %s/%s in cluster: %s, namespace: %s", obj.GetKind(), obj.GetName(), k.config.Host, obj.GetNamespace())) - return k.runResourceCommand(ctx, obj, dryRunStrategy, false, func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error { - cleanup, err := k.processKubectlRun("replace") + return k.runResourceCommand(ctx, obj, dryRunStrategy, func(ioStreams genericclioptions.IOStreams, fileName string) error { + cleanup, err := processKubectlRun(k.onKubectlRun, "replace") if err != nil { return err } defer cleanup() - replaceOptions, err := k.newReplaceOptions(k.config, f, ioStreams, fileName, obj.GetNamespace(), force, dryRunStrategy) + replaceOptions, err := k.newReplaceOptions(k.config, k.fact, ioStreams, fileName, obj.GetNamespace(), force, dryRunStrategy) if err != nil { return err } - return replaceOptions.Run(f) + return replaceOptions.Run(k.fact) }) } @@ -189,8 +247,8 @@ func (k *kubectlResourceOperations) CreateResource(ctx context.Context, obj *uns span.SetBaggageItem("kind", gvk.Kind) span.SetBaggageItem("name", obj.GetName()) defer span.Finish() - return k.runResourceCommand(ctx, obj, dryRunStrategy, false, func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error { - cleanup, err := k.processKubectlRun("create") + return k.runResourceCommand(ctx, obj, dryRunStrategy, func(ioStreams genericclioptions.IOStreams, fileName string) error { + cleanup, err := processKubectlRun(k.onKubectlRun, "create") if err != nil { return err } @@ -209,7 +267,7 @@ func (k *kubectlResourceOperations) CreateResource(ctx context.Context, obj *uns _ = command.Flags().Set("validate", "true") } - return createOptions.RunCreate(f, command) + return createOptions.RunCreate(k.fact, command) }) } @@ -243,25 +301,55 @@ func (k *kubectlResourceOperations) UpdateResource(ctx context.Context, obj *uns } // ApplyResource performs an apply of a unstructured resource -func (k *kubectlResourceOperations) ApplyResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string, serverSideDiff bool) (string, error) { +func (k *kubectlServerSideDiffDryRunApplier) ApplyResource(_ context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string) (string, error) { + span := k.tracer.StartSpan("ApplyResource") + span.SetBaggageItem("kind", obj.GetKind()) + span.SetBaggageItem("name", obj.GetName()) + defer span.Finish() + k.log.V(1).WithValues( + "dry-run", [...]string{"none", "client", "server"}[dryRunStrategy], + "manager", manager, + "serverSideApply", serverSideApply).Info(fmt.Sprintf("Running server-side diff. Dry run applying resource %s/%s in cluster: %s, namespace: %s", obj.GetKind(), obj.GetName(), k.config.Host, obj.GetNamespace())) + + return k.runResourceCommand(obj, func(ioStreams genericclioptions.IOStreams, fileName string) error { + cleanup, err := processKubectlRun(k.onKubectlRun, "apply") + if err != nil { + return err + } + defer cleanup() + + applyOpts, err := k.newApplyOptions(ioStreams, obj, fileName, validate, force, serverSideApply, dryRunStrategy, manager) + if err != nil { + return err + } + return applyOpts.Run() + }) +} + +// ApplyResource performs an apply of a unstructured resource +func (k *kubectlResourceOperations) ApplyResource(ctx context.Context, obj *unstructured.Unstructured, dryRunStrategy cmdutil.DryRunStrategy, force, validate, serverSideApply bool, manager string) (string, error) { span := k.tracer.StartSpan("ApplyResource") span.SetBaggageItem("kind", obj.GetKind()) span.SetBaggageItem("name", obj.GetName()) defer span.Finish() - k.log.WithValues( + logWithLevel := k.log + if dryRunStrategy != cmdutil.DryRunNone { + logWithLevel = logWithLevel.V(1) + } + logWithLevel.WithValues( "dry-run", [...]string{"none", "client", "server"}[dryRunStrategy], "manager", manager, "serverSideApply", serverSideApply, - "serverSideDiff", serverSideDiff).Info(fmt.Sprintf("Applying resource %s/%s in cluster: %s, namespace: %s", obj.GetKind(), obj.GetName(), k.config.Host, obj.GetNamespace())) + "serverSideDiff", true).Info(fmt.Sprintf("Applying resource %s/%s in cluster: %s, namespace: %s", obj.GetKind(), obj.GetName(), k.config.Host, obj.GetNamespace())) - return k.runResourceCommand(ctx, obj, dryRunStrategy, serverSideDiff, func(f cmdutil.Factory, ioStreams genericclioptions.IOStreams, fileName string) error { - cleanup, err := k.processKubectlRun("apply") + return k.runResourceCommand(ctx, obj, dryRunStrategy, func(ioStreams genericclioptions.IOStreams, fileName string) error { + cleanup, err := processKubectlRun(k.onKubectlRun, "apply") if err != nil { return err } defer cleanup() - applyOpts, err := k.newApplyOptions(ioStreams, obj, fileName, validate, force, serverSideApply, dryRunStrategy, manager, serverSideDiff) + applyOpts, err := k.newApplyOptions(ioStreams, obj, fileName, validate, force, serverSideApply, dryRunStrategy, manager) if err != nil { return err } @@ -269,7 +357,7 @@ func (k *kubectlResourceOperations) ApplyResource(ctx context.Context, obj *unst }) } -func (k *kubectlResourceOperations) newApplyOptions(ioStreams genericclioptions.IOStreams, obj *unstructured.Unstructured, fileName string, validate bool, force, serverSideApply bool, dryRunStrategy cmdutil.DryRunStrategy, manager string, serverSideDiff bool) (*apply.ApplyOptions, error) { +func newApplyOptionsCommon(config *rest.Config, fact cmdutil.Factory, ioStreams genericclioptions.IOStreams, obj *unstructured.Unstructured, fileName string, validate bool, force, serverSideApply bool, dryRunStrategy cmdutil.DryRunStrategy, manager string) (*apply.ApplyOptions, error) { flags := apply.NewApplyFlags(ioStreams) o := &apply.ApplyOptions{ IOStreams: ioStreams, @@ -281,7 +369,7 @@ func (k *kubectlResourceOperations) newApplyOptions(ioStreams genericclioptions. OpenAPIPatch: true, ServerSideApply: serverSideApply, } - dynamicClient, err := dynamic.NewForConfig(k.config) + dynamicClient, err := dynamic.NewForConfig(config) if err != nil { return nil, err } @@ -290,19 +378,60 @@ func (k *kubectlResourceOperations) newApplyOptions(ioStreams genericclioptions. if err != nil { return nil, err } - o.OpenAPIGetter = k.fact + o.OpenAPIGetter = fact o.DryRunStrategy = dryRunStrategy o.FieldManager = manager validateDirective := metav1.FieldValidationIgnore if validate { validateDirective = metav1.FieldValidationStrict } - o.Validator, err = k.fact.Validator(validateDirective) + o.Validator, err = fact.Validator(validateDirective) if err != nil { return nil, err } - o.Builder = k.fact.NewBuilder() - o.Mapper, err = k.fact.ToRESTMapper() + o.Builder = fact.NewBuilder() + o.Mapper, err = fact.ToRESTMapper() + if err != nil { + return nil, err + } + + o.DeleteOptions.FilenameOptions.Filenames = []string{fileName} + o.Namespace = obj.GetNamespace() + o.DeleteOptions.ForceDeletion = force + o.DryRunStrategy = dryRunStrategy + if manager != "" { + o.FieldManager = manager + } + return o, nil +} + +func (k *kubectlServerSideDiffDryRunApplier) newApplyOptions(ioStreams genericclioptions.IOStreams, obj *unstructured.Unstructured, fileName string, validate bool, force, serverSideApply bool, dryRunStrategy cmdutil.DryRunStrategy, manager string) (*apply.ApplyOptions, error) { + o, err := newApplyOptionsCommon(k.config, k.fact, ioStreams, obj, fileName, validate, force, serverSideApply, dryRunStrategy, manager) + if err != nil { + return nil, err + } + + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { + o.PrintFlags.NamePrintFlags.Operation = operation + if o.DryRunStrategy != cmdutil.DryRunServer { + return nil, fmt.Errorf("invalid dry run strategy passed to server-side diff dry run applier: %d, expected %d", o.DryRunStrategy, cmdutil.DryRunServer) + } + // managedFields are required by server-side diff to identify + // changes made by mutation webhooks. + o.PrintFlags.JSONYamlPrintFlags.ShowManagedFields = true + p, err := o.PrintFlags.JSONYamlPrintFlags.ToPrinter("json") + if err != nil { + return nil, fmt.Errorf("error configuring server-side diff printer: %w", err) + } + return p, nil + } + + o.ForceConflicts = true + return o, nil +} + +func (k *kubectlResourceOperations) newApplyOptions(ioStreams genericclioptions.IOStreams, obj *unstructured.Unstructured, fileName string, validate bool, force, serverSideApply bool, dryRunStrategy cmdutil.DryRunStrategy, manager string) (*apply.ApplyOptions, error) { + o, err := newApplyOptionsCommon(k.config, k.fact, ioStreams, obj, fileName, validate, force, serverSideApply, dryRunStrategy, manager) if err != nil { return nil, err } @@ -316,32 +445,15 @@ func (k *kubectlResourceOperations) newApplyOptions(ioStreams genericclioptions. return nil, err } case cmdutil.DryRunServer: - if serverSideDiff { - // managedFields are required by server-side diff to identify - // changes made by mutation webhooks. - o.PrintFlags.JSONYamlPrintFlags.ShowManagedFields = true - p, err := o.PrintFlags.JSONYamlPrintFlags.ToPrinter("json") - if err != nil { - return nil, fmt.Errorf("error configuring server-side diff printer: %w", err) - } - return p, nil - } else { - err = o.PrintFlags.Complete("%s (server dry run)") - if err != nil { - return nil, fmt.Errorf("error configuring server dryrun printer: %w", err) - } + err = o.PrintFlags.Complete("%s (server dry run)") + if err != nil { + return nil, fmt.Errorf("error configuring server dryrun printer: %w", err) } } return o.PrintFlags.ToPrinter() } - o.DeleteOptions.FilenameOptions.Filenames = []string{fileName} - o.Namespace = obj.GetNamespace() - o.DeleteOptions.ForceDeletion = force - o.DryRunStrategy = dryRunStrategy - if manager != "" { - o.FieldManager = manager - } - if serverSideApply || serverSideDiff { + + if serverSideApply { o.ForceConflicts = true } return o, nil @@ -504,9 +616,9 @@ func (k *kubectlResourceOperations) authReconcile(ctx context.Context, obj *unst return strings.Join(out, ". "), nil } -func (k *kubectlResourceOperations) processKubectlRun(cmd string) (CleanupFunc, error) { - if k.onKubectlRun != nil { - return k.onKubectlRun(cmd) +func processKubectlRun(onKubectlRun OnKubectlRunFunc, cmd string) (CleanupFunc, error) { + if onKubectlRun != nil { + return onKubectlRun(cmd) } return func() {}, nil } diff --git a/vendor/github.com/argoproj/pkg/time/time.go b/vendor/github.com/argoproj/pkg/time/time.go deleted file mode 100644 index 4b75cf856..000000000 --- a/vendor/github.com/argoproj/pkg/time/time.go +++ /dev/null @@ -1,47 +0,0 @@ -package time - -import ( - "log" - "regexp" - "strconv" - "time" - - "github.com/pkg/errors" -) - -var durationRegex = regexp.MustCompile(`^(\d+)([smhd])$`) - -// ParseDuration parses a duration string and returns the time.Duration -func ParseDuration(duration string) (*time.Duration, error) { - matches := durationRegex.FindStringSubmatch(duration) - if len(matches) != 3 { - return nil, errors.Errorf("Invalid since format '%s'. Expected format (e.g. 3h)\n", duration) - } - amount, err := strconv.ParseInt(matches[1], 10, 64) - if err != nil { - log.Fatal(err) - } - var unit time.Duration - switch matches[2] { - case "s": - unit = time.Second - case "m": - unit = time.Minute - case "h": - unit = time.Hour - case "d": - unit = time.Hour * 24 - } - dur := unit * time.Duration(amount) - return &dur, nil -} - -// ParseSince parses a duration string and returns a time.Time in history relative to current time -func ParseSince(duration string) (*time.Time, error) { - dur, err := ParseDuration(duration) - if err != nil { - return nil, err - } - since := time.Now().UTC().Add(-*dur) - return &since, nil -} diff --git a/vendor/github.com/bmatcuk/doublestar/v4/README.md b/vendor/github.com/bmatcuk/doublestar/v4/README.md index 70117eff2..21929a954 100644 --- a/vendor/github.com/bmatcuk/doublestar/v4/README.md +++ b/vendor/github.com/bmatcuk/doublestar/v4/README.md @@ -89,6 +89,19 @@ Note: users should _not_ count on the returned error, `doublestar.ErrBadPattern`, being equal to `path.ErrBadPattern`. +### MatchUnvalidated + +```go +func MatchUnvalidated(pattern, name string) bool +``` + +MatchUnvalidated can provide a small performance improvement if you don't care +about whether or not the pattern is valid (perhaps because you already ran +`ValidatePattern`). Note that there's really only one case where this +performance improvement is realized: when pattern matching reaches the end of +`name` before reaching the end of `pattern`, such as `Match("a/b/c", "a")`. + + ### PathMatch ```go @@ -105,6 +118,20 @@ that both `pattern` and `name` are using the system's path separator. If you can't be sure of that, use `filepath.ToSlash()` on both `pattern` and `name`, and then use the `Match()` function instead. + +### PathMatchUnvalidated + +```go +func PathMatchUnvalidated(pattern, name string) bool +``` + +PathMatchUnvalidated can provide a small performance improvement if you don't +care about whether or not the pattern is valid (perhaps because you already ran +`ValidatePattern`). Note that there's really only one case where this +performance improvement is realized: when pattern matching reaches the end of +`name` before reaching the end of `pattern`, such as `Match("a/b/c", "a")`. + + ### GlobOption Options that may be passed to `Glob`, `GlobWalk`, or `FilepathGlob`. Any number @@ -387,6 +414,8 @@ ever since. In that time, it has grown into one of the most popular globbing libraries in the Go ecosystem. So, if **doublestar** is a useful library in your project, consider [sponsoring] my work! I'd really appreciate it! +[![MASV](../sponsors/MASV.png?raw=true)](https://massive.io/) + Thanks for sponsoring me! ## License diff --git a/vendor/github.com/bmatcuk/doublestar/v4/match.go b/vendor/github.com/bmatcuk/doublestar/v4/match.go index 4232c79f3..c0f20afa4 100644 --- a/vendor/github.com/bmatcuk/doublestar/v4/match.go +++ b/vendor/github.com/bmatcuk/doublestar/v4/match.go @@ -53,6 +53,17 @@ func Match(pattern, name string) (bool, error) { return matchWithSeparator(pattern, name, '/', true) } +// MatchUnvalidated can provide a small performance improvement if you don't +// care about whether or not the pattern is valid (perhaps because you already +// ran `ValidatePattern`). Note that there's really only one case where this +// performance improvement is realized: when pattern matching reaches the end +// of `name` before reaching the end of `pattern`, such as `Match("a/b/c", +// "a")`. +func MatchUnvalidated(pattern, name string) bool { + matched, _ := matchWithSeparator(pattern, name, '/', false) + return matched +} + // PathMatch returns true if `name` matches the file name `pattern`. The // difference between Match and PathMatch is that PathMatch will automatically // use your system's path separator to split `name` and `pattern`. On systems @@ -67,6 +78,17 @@ func PathMatch(pattern, name string) (bool, error) { return matchWithSeparator(pattern, name, filepath.Separator, true) } +// PathMatchUnvalidated can provide a small performance improvement if you +// don't care about whether or not the pattern is valid (perhaps because you +// already ran `ValidatePattern`). Note that there's really only one case where +// this performance improvement is realized: when pattern matching reaches the +// end of `name` before reaching the end of `pattern`, such as `Match("a/b/c", +// "a")`. +func PathMatchUnvalidated(pattern, name string) bool { + matched, _ := matchWithSeparator(pattern, name, filepath.Separator, false) + return matched +} + func matchWithSeparator(pattern, name string, separator rune, validate bool) (matched bool, err error) { return doMatchWithSeparator(pattern, name, separator, validate, -1, -1, -1, -1, 0, 0) } diff --git a/vendor/github.com/bmatcuk/doublestar/v4/utils.go b/vendor/github.com/bmatcuk/doublestar/v4/utils.go index 0ab1dc98f..6b8df9a38 100644 --- a/vendor/github.com/bmatcuk/doublestar/v4/utils.go +++ b/vendor/github.com/bmatcuk/doublestar/v4/utils.go @@ -84,6 +84,16 @@ func SplitPattern(p string) (base, pattern string) { // filepath.ErrBadPattern. // func FilepathGlob(pattern string, opts ...GlobOption) (matches []string, err error) { + if pattern == "" { + // special case to match filepath.Glob behavior + g := newGlob(opts...) + if g.failOnIOErrors { + // match doublestar.Glob behavior here + return nil, os.ErrInvalid + } + return nil, nil + } + pattern = filepath.Clean(pattern) pattern = filepath.ToSlash(pattern) base, f := SplitPattern(pattern) diff --git a/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go b/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go index 5518005dc..8541d206a 100644 --- a/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go +++ b/vendor/github.com/bradleyfalzon/ghinstallation/v2/transport.go @@ -13,7 +13,7 @@ import ( "sync" "time" - "github.com/google/go-github/v62/github" + "github.com/google/go-github/v66/github" ) const ( diff --git a/vendor/github.com/casbin/casbin/v2/.gitignore b/vendor/github.com/casbin/casbin/v2/.gitignore new file mode 100644 index 000000000..da27805f5 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/.gitignore @@ -0,0 +1,30 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +.idea/ +*.iml + +# vendor files +vendor diff --git a/vendor/github.com/casbin/casbin/v2/.golangci.yml b/vendor/github.com/casbin/casbin/v2/.golangci.yml new file mode 100644 index 000000000..b8d362019 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/.golangci.yml @@ -0,0 +1,354 @@ +# Based on https://gist.github.com/maratori/47a4d00457a92aa426dbd48a18776322 +# This code is licensed under the terms of the MIT license https://opensource.org/license/mit +# Copyright (c) 2021 Marat Reymers + +## Golden config for golangci-lint v1.56.2 +# +# This is the best config for golangci-lint based on my experience and opinion. +# It is very strict, but not extremely strict. +# Feel free to adapt and change it for your needs. + +run: + # Timeout for analysis, e.g. 30s, 5m. + # Default: 1m + timeout: 3m + + +# This file contains only configs which differ from defaults. +# All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml +linters-settings: + cyclop: + # The maximal code complexity to report. + # Default: 10 + max-complexity: 30 + # The maximal average package complexity. + # If it's higher than 0.0 (float) the check is enabled + # Default: 0.0 + package-average: 10.0 + + errcheck: + # Report about not checking of errors in type assertions: `a := b.(MyStruct)`. + # Such cases aren't reported by default. + # Default: false + check-type-assertions: true + + exhaustive: + # Program elements to check for exhaustiveness. + # Default: [ switch ] + check: + - switch + - map + + exhaustruct: + # List of regular expressions to exclude struct packages and their names from checks. + # Regular expressions must match complete canonical struct package/name/structname. + # Default: [] + exclude: + # std libs + - "^net/http.Client$" + - "^net/http.Cookie$" + - "^net/http.Request$" + - "^net/http.Response$" + - "^net/http.Server$" + - "^net/http.Transport$" + - "^net/url.URL$" + - "^os/exec.Cmd$" + - "^reflect.StructField$" + # public libs + - "^github.com/Shopify/sarama.Config$" + - "^github.com/Shopify/sarama.ProducerMessage$" + - "^github.com/mitchellh/mapstructure.DecoderConfig$" + - "^github.com/prometheus/client_golang/.+Opts$" + - "^github.com/spf13/cobra.Command$" + - "^github.com/spf13/cobra.CompletionOptions$" + - "^github.com/stretchr/testify/mock.Mock$" + - "^github.com/testcontainers/testcontainers-go.+Request$" + - "^github.com/testcontainers/testcontainers-go.FromDockerfile$" + - "^golang.org/x/tools/go/analysis.Analyzer$" + - "^google.golang.org/protobuf/.+Options$" + - "^gopkg.in/yaml.v3.Node$" + + funlen: + # Checks the number of lines in a function. + # If lower than 0, disable the check. + # Default: 60 + lines: 100 + # Checks the number of statements in a function. + # If lower than 0, disable the check. + # Default: 40 + statements: 50 + # Ignore comments when counting lines. + # Default false + ignore-comments: true + + gocognit: + # Minimal code complexity to report. + # Default: 30 (but we recommend 10-20) + min-complexity: 20 + + gocritic: + # Settings passed to gocritic. + # The settings key is the name of a supported gocritic checker. + # The list of supported checkers can be find in https://go-critic.github.io/overview. + settings: + captLocal: + # Whether to restrict checker to params only. + # Default: true + paramsOnly: false + underef: + # Whether to skip (*x).method() calls where x is a pointer receiver. + # Default: true + skipRecvDeref: false + + gomnd: + # List of function patterns to exclude from analysis. + # Values always ignored: `time.Date`, + # `strconv.FormatInt`, `strconv.FormatUint`, `strconv.FormatFloat`, + # `strconv.ParseInt`, `strconv.ParseUint`, `strconv.ParseFloat`. + # Default: [] + ignored-functions: + - flag.Arg + - flag.Duration.* + - flag.Float.* + - flag.Int.* + - flag.Uint.* + - os.Chmod + - os.Mkdir.* + - os.OpenFile + - os.WriteFile + - prometheus.ExponentialBuckets.* + - prometheus.LinearBuckets + + gomodguard: + blocked: + # List of blocked modules. + # Default: [] + modules: + - github.com/golang/protobuf: + recommendations: + - google.golang.org/protobuf + reason: "see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules" + - github.com/satori/go.uuid: + recommendations: + - github.com/google/uuid + reason: "satori's package is not maintained" + - github.com/gofrs/uuid: + recommendations: + - github.com/gofrs/uuid/v5 + reason: "gofrs' package was not go module before v5" + + govet: + # Enable all analyzers. + # Default: false + enable-all: true + # Disable analyzers by name. + # Run `go tool vet help` to see all analyzers. + # Default: [] + disable: + - fieldalignment # too strict + # Settings per analyzer. + settings: + shadow: + # Whether to be strict about shadowing; can be noisy. + # Default: false + #strict: true + + inamedparam: + # Skips check for interface methods with only a single parameter. + # Default: false + skip-single-param: true + + nakedret: + # Make an issue if func has more lines of code than this setting, and it has naked returns. + # Default: 30 + max-func-lines: 0 + + nolintlint: + # Exclude following linters from requiring an explanation. + # Default: [] + allow-no-explanation: [ funlen, gocognit, lll ] + # Enable to require an explanation of nonzero length after each nolint directive. + # Default: false + require-explanation: true + # Enable to require nolint directives to mention the specific linter being suppressed. + # Default: false + require-specific: true + + perfsprint: + # Optimizes into strings concatenation. + # Default: true + strconcat: false + + rowserrcheck: + # database/sql is always checked + # Default: [] + packages: + - github.com/jmoiron/sqlx + + tenv: + # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures. + # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked. + # Default: false + all: true + + stylecheck: + # STxxxx checks in https://staticcheck.io/docs/configuration/options/#checks + # Default: ["*"] + checks: ["all", "-ST1003"] + + revive: + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter + - name: unused-parameter + disabled: true + +linters: + disable-all: true + enable: + ## enabled by default + #- errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases + - gosimple # specializes in simplifying a code + - govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string + - ineffassign # detects when assignments to existing variables are not used + - staticcheck # is a go vet on steroids, applying a ton of static analysis checks + - typecheck # like the front-end of a Go compiler, parses and type-checks Go code + - unused # checks for unused constants, variables, functions and types + ## disabled by default + - asasalint # checks for pass []any as any in variadic func(...any) + - asciicheck # checks that your code does not contain non-ASCII identifiers + - bidichk # checks for dangerous unicode character sequences + - bodyclose # checks whether HTTP response body is closed successfully + - cyclop # checks function and package cyclomatic complexity + - dupl # tool for code clone detection + - durationcheck # checks for two durations multiplied together + - errname # checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error + #- errorlint # finds code that will cause problems with the error wrapping scheme introduced in Go 1.13 + - execinquery # checks query string in Query function which reads your Go src files and warning it finds + - exhaustive # checks exhaustiveness of enum switch statements + - exportloopref # checks for pointers to enclosing loop variables + #- forbidigo # forbids identifiers + - funlen # tool for detection of long functions + - gocheckcompilerdirectives # validates go compiler directive comments (//go:) + #- gochecknoglobals # checks that no global variables exist + - gochecknoinits # checks that no init functions are present in Go code + - gochecksumtype # checks exhaustiveness on Go "sum types" + #- gocognit # computes and checks the cognitive complexity of functions + #- goconst # finds repeated strings that could be replaced by a constant + #- gocritic # provides diagnostics that check for bugs, performance and style issues + - gocyclo # computes and checks the cyclomatic complexity of functions + - godot # checks if comments end in a period + - goimports # in addition to fixing imports, goimports also formats your code in the same style as gofmt + #- gomnd # detects magic numbers + - gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod + - gomodguard # allow and block lists linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations + - goprintffuncname # checks that printf-like functions are named with f at the end + - gosec # inspects source code for security problems + #- lll # reports long lines + - loggercheck # checks key value pairs for common logger libraries (kitlog,klog,logr,zap) + - makezero # finds slice declarations with non-zero initial length + - mirror # reports wrong mirror patterns of bytes/strings usage + - musttag # enforces field tags in (un)marshaled structs + - nakedret # finds naked returns in functions greater than a specified function length + - nestif # reports deeply nested if statements + - nilerr # finds the code that returns nil even if it checks that the error is not nil + #- nilnil # checks that there is no simultaneous return of nil error and an invalid value + - noctx # finds sending http request without context.Context + - nolintlint # reports ill-formed or insufficient nolint directives + #- nonamedreturns # reports all named returns + - nosprintfhostport # checks for misuse of Sprintf to construct a host with port in a URL + #- perfsprint # checks that fmt.Sprintf can be replaced with a faster alternative + - predeclared # finds code that shadows one of Go's predeclared identifiers + - promlinter # checks Prometheus metrics naming via promlint + - protogetter # reports direct reads from proto message fields when getters should be used + - reassign # checks that package variables are not reassigned + - revive # fast, configurable, extensible, flexible, and beautiful linter for Go, drop-in replacement of golint + - rowserrcheck # checks whether Err of rows is checked successfully + - sloglint # ensure consistent code style when using log/slog + - spancheck # checks for mistakes with OpenTelemetry/Census spans + - sqlclosecheck # checks that sql.Rows and sql.Stmt are closed + - stylecheck # is a replacement for golint + - tenv # detects using os.Setenv instead of t.Setenv since Go1.17 + - testableexamples # checks if examples are testable (have an expected output) + - testifylint # checks usage of github.com/stretchr/testify + #- testpackage # makes you use a separate _test package + - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes + - unconvert # removes unnecessary type conversions + #- unparam # reports unused function parameters + - usestdlibvars # detects the possibility to use variables/constants from the Go standard library + - wastedassign # finds wasted assignment statements + - whitespace # detects leading and trailing whitespace + + ## you may want to enable + #- decorder # checks declaration order and count of types, constants, variables and functions + #- exhaustruct # [highly recommend to enable] checks if all structure fields are initialized + #- gci # controls golang package import order and makes it always deterministic + #- ginkgolinter # [if you use ginkgo/gomega] enforces standards of using ginkgo and gomega + #- godox # detects FIXME, TODO and other comment keywords + #- goheader # checks is file header matches to pattern + #- inamedparam # [great idea, but too strict, need to ignore a lot of cases by default] reports interfaces with unnamed method parameters + #- interfacebloat # checks the number of methods inside an interface + #- ireturn # accept interfaces, return concrete types + #- prealloc # [premature optimization, but can be used in some cases] finds slice declarations that could potentially be preallocated + #- tagalign # checks that struct tags are well aligned + #- varnamelen # [great idea, but too many false positives] checks that the length of a variable's name matches its scope + #- wrapcheck # checks that errors returned from external packages are wrapped + #- zerologlint # detects the wrong usage of zerolog that a user forgets to dispatch zerolog.Event + + ## disabled + #- containedctx # detects struct contained context.Context field + #- contextcheck # [too many false positives] checks the function whether use a non-inherited context + #- depguard # [replaced by gomodguard] checks if package imports are in a list of acceptable packages + #- dogsled # checks assignments with too many blank identifiers (e.g. x, _, _, _, := f()) + #- dupword # [useless without config] checks for duplicate words in the source code + #- errchkjson # [don't see profit + I'm against of omitting errors like in the first example https://github.com/breml/errchkjson] checks types passed to the json encoding functions. Reports unsupported types and optionally reports occasions, where the check for the returned error can be omitted + #- forcetypeassert # [replaced by errcheck] finds forced type assertions + #- goerr113 # [too strict] checks the errors handling expressions + #- gofmt # [replaced by goimports] checks whether code was gofmt-ed + #- gofumpt # [replaced by goimports, gofumports is not available yet] checks whether code was gofumpt-ed + #- gosmopolitan # reports certain i18n/l10n anti-patterns in your Go codebase + #- grouper # analyzes expression groups + #- importas # enforces consistent import aliases + #- maintidx # measures the maintainability index of each function + #- misspell # [useless] finds commonly misspelled English words in comments + #- nlreturn # [too strict and mostly code is not more readable] checks for a new line before return and branch statements to increase code clarity + #- paralleltest # [too many false positives] detects missing usage of t.Parallel() method in your Go test + #- tagliatelle # checks the struct tags + #- thelper # detects golang test helpers without t.Helper() call and checks the consistency of test helpers + #- wsl # [too strict and mostly code is not more readable] whitespace linter forces you to use empty lines + + ## deprecated + #- deadcode # [deprecated, replaced by unused] finds unused code + #- exhaustivestruct # [deprecated, replaced by exhaustruct] checks if all struct's fields are initialized + #- golint # [deprecated, replaced by revive] golint differs from gofmt. Gofmt reformats Go source code, whereas golint prints out style mistakes + #- ifshort # [deprecated] checks that your code uses short syntax for if-statements whenever possible + #- interfacer # [deprecated] suggests narrower interface types + #- maligned # [deprecated, replaced by govet fieldalignment] detects Go structs that would take less memory if their fields were sorted + #- nosnakecase # [deprecated, replaced by revive var-naming] detects snake case of variable naming and function name + #- scopelint # [deprecated, replaced by exportloopref] checks for unpinned variables in go programs + #- structcheck # [deprecated, replaced by unused] finds unused struct fields + #- varcheck # [deprecated, replaced by unused] finds unused global variables and constants + + +issues: + # Maximum count of issues with the same text. + # Set to 0 to disable. + # Default: 3 + max-same-issues: 50 + + exclude-rules: + - source: "(noinspection|TODO)" + linters: [ godot ] + - source: "//noinspection" + linters: [ gocritic ] + - path: "_test\\.go" + linters: + - bodyclose + - dupl + - funlen + - goconst + - gosec + - noctx + - wrapcheck + # TODO: remove after PR is released https://github.com/golangci/golangci-lint/pull/4386 + - text: "fmt.Sprintf can be replaced with string addition" + linters: [ perfsprint ] \ No newline at end of file diff --git a/vendor/github.com/casbin/casbin/v2/.releaserc.json b/vendor/github.com/casbin/casbin/v2/.releaserc.json new file mode 100644 index 000000000..58cb0bb4c --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/.releaserc.json @@ -0,0 +1,16 @@ +{ + "debug": true, + "branches": [ + "+([0-9])?(.{+([0-9]),x}).x", + "master", + { + "name": "beta", + "prerelease": true + } + ], + "plugins": [ + "@semantic-release/commit-analyzer", + "@semantic-release/release-notes-generator", + "@semantic-release/github" + ] +} diff --git a/vendor/github.com/casbin/casbin/v2/.travis.yml b/vendor/github.com/casbin/casbin/v2/.travis.yml new file mode 100644 index 000000000..cea21652e --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/.travis.yml @@ -0,0 +1,15 @@ +language: go + +sudo: false + +env: + - GO111MODULE=on + +go: + - "1.11.13" + - "1.12" + - "1.13" + - "1.14" + +script: + - make test diff --git a/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md b/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md new file mode 100644 index 000000000..4bab59c93 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md @@ -0,0 +1,35 @@ +# How to contribute + +The following is a set of guidelines for contributing to casbin and its libraries, which are hosted at [casbin organization at Github](https://github.com/casbin). + +This project adheres to the [Contributor Covenant 1.2.](https://www.contributor-covenant.org/version/1/2/0/code-of-conduct.html) By participating, you are expected to uphold this code. Please report unacceptable behavior to info@casbin.com. + +## Questions + +- We do our best to have an [up-to-date documentation](https://casbin.org/docs/overview) +- [Stack Overflow](https://stackoverflow.com) is the best place to start if you have a question. Please use the [casbin tag](https://stackoverflow.com/tags/casbin/info) we are actively monitoring. We encourage you to use Stack Overflow specially for Modeling Access Control Problems, in order to build a shared knowledge base. +- You can also join our [Discord](https://discord.gg/S5UjpzGZjN). + +## Reporting issues + +Reporting issues are a great way to contribute to the project. We are perpetually grateful about a well-written, through bug report. + +Before raising a new issue, check our [issue list](https://github.com/casbin/casbin/issues) to determine if it already contains the problem that you are facing. + +A good bug report shouldn't leave others needing to chase you for more information. Please be as detailed as possible. The following questions might serve as a template for writing a detailed report: + +What were you trying to achieve? +What are the expected results? +What are the received results? +What are the steps to reproduce the issue? +In what environment did you encounter the issue? + +Feature requests can also be submitted as issues. + +## Pull requests + +Good pull requests (e.g. patches, improvements, new features) are a fantastic help. They should remain focused in scope and avoid unrelated commits. + +Please ask first before embarking on any significant pull request (e.g. implementing new features, refactoring code etc.), otherwise you risk spending a lot of time working on something that the maintainers might not want to merge into the project. + +First add an issue to the project to discuss the improvement. Please adhere to the coding conventions used throughout the project. If in doubt, consult the [Effective Go style guide](https://golang.org/doc/effective_go.html). diff --git a/vendor/github.com/casbin/casbin/v2/LICENSE b/vendor/github.com/casbin/casbin/v2/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/casbin/casbin/v2/Makefile b/vendor/github.com/casbin/casbin/v2/Makefile new file mode 100644 index 000000000..64546af2a --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/Makefile @@ -0,0 +1,16 @@ +SHELL = /bin/bash +export PATH := $(shell yarn global bin):$(PATH) + +default: lint test + +test: + go test -race -v ./... + +benchmark: + go test -bench=. + +lint: + golangci-lint run --verbose + +release: + npx semantic-release@v19.0.2 diff --git a/vendor/github.com/casbin/casbin/v2/README.md b/vendor/github.com/casbin/casbin/v2/README.md new file mode 100644 index 000000000..36549f55f --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/README.md @@ -0,0 +1,296 @@ +Casbin +==== + +[![Go Report Card](https://goreportcard.com/badge/github.com/casbin/casbin)](https://goreportcard.com/report/github.com/casbin/casbin) +[![Build](https://github.com/casbin/casbin/actions/workflows/default.yml/badge.svg)](https://github.com/casbin/casbin/actions/workflows/default.yml) +[![Coverage Status](https://coveralls.io/repos/github/casbin/casbin/badge.svg?branch=master)](https://coveralls.io/github/casbin/casbin?branch=master) +[![Godoc](https://godoc.org/github.com/casbin/casbin?status.svg)](https://pkg.go.dev/github.com/casbin/casbin/v2) +[![Release](https://img.shields.io/github/release/casbin/casbin.svg)](https://github.com/casbin/casbin/releases/latest) +[![Discord](https://img.shields.io/discord/1022748306096537660?logo=discord&label=discord&color=5865F2)](https://discord.gg/S5UjpzGZjN) +[![Sourcegraph](https://sourcegraph.com/github.com/casbin/casbin/-/badge.svg)](https://sourcegraph.com/github.com/casbin/casbin?badge) + +**News**: still worry about how to write the correct Casbin policy? ``Casbin online editor`` is coming to help! Try it at: https://casbin.org/editor/ + +![casbin Logo](casbin-logo.png) + +Casbin is a powerful and efficient open-source access control library for Golang projects. It provides support for enforcing authorization based on various [access control models](https://en.wikipedia.org/wiki/Computer_security_model). + +

+ Sponsored by +
+ + + + + + +
+ Build auth with fraud prevention, faster.
Try Stytch for API-first authentication, user & org management, multi-tenant SSO, MFA, device fingerprinting, and more.
+
+

+ +## All the languages supported by Casbin: + +| [![golang](https://casbin.org/img/langs/golang.png)](https://github.com/casbin/casbin) | [![java](https://casbin.org/img/langs/java.png)](https://github.com/casbin/jcasbin) | [![nodejs](https://casbin.org/img/langs/nodejs.png)](https://github.com/casbin/node-casbin) | [![php](https://casbin.org/img/langs/php.png)](https://github.com/php-casbin/php-casbin) | +|----------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------| +| [Casbin](https://github.com/casbin/casbin) | [jCasbin](https://github.com/casbin/jcasbin) | [node-Casbin](https://github.com/casbin/node-casbin) | [PHP-Casbin](https://github.com/php-casbin/php-casbin) | +| production-ready | production-ready | production-ready | production-ready | + +| [![python](https://casbin.org/img/langs/python.png)](https://github.com/casbin/pycasbin) | [![dotnet](https://casbin.org/img/langs/dotnet.png)](https://github.com/casbin-net/Casbin.NET) | [![c++](https://casbin.org/img/langs/cpp.png)](https://github.com/casbin/casbin-cpp) | [![rust](https://casbin.org/img/langs/rust.png)](https://github.com/casbin/casbin-rs) | +|------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------| +| [PyCasbin](https://github.com/casbin/pycasbin) | [Casbin.NET](https://github.com/casbin-net/Casbin.NET) | [Casbin-CPP](https://github.com/casbin/casbin-cpp) | [Casbin-RS](https://github.com/casbin/casbin-rs) | +| production-ready | production-ready | production-ready | production-ready | + +## Table of contents + +- [Supported models](#supported-models) +- [How it works?](#how-it-works) +- [Features](#features) +- [Installation](#installation) +- [Documentation](#documentation) +- [Online editor](#online-editor) +- [Tutorials](#tutorials) +- [Get started](#get-started) +- [Policy management](#policy-management) +- [Policy persistence](#policy-persistence) +- [Policy consistence between multiple nodes](#policy-consistence-between-multiple-nodes) +- [Role manager](#role-manager) +- [Benchmarks](#benchmarks) +- [Examples](#examples) +- [Middlewares](#middlewares) +- [Our adopters](#our-adopters) + +## Supported models + +1. [**ACL (Access Control List)**](https://en.wikipedia.org/wiki/Access_control_list) +2. **ACL with [superuser](https://en.wikipedia.org/wiki/Superuser)** +3. **ACL without users**: especially useful for systems that don't have authentication or user log-ins. +3. **ACL without resources**: some scenarios may target for a type of resources instead of an individual resource by using permissions like ``write-article``, ``read-log``. It doesn't control the access to a specific article or log. +4. **[RBAC (Role-Based Access Control)](https://en.wikipedia.org/wiki/Role-based_access_control)** +5. **RBAC with resource roles**: both users and resources can have roles (or groups) at the same time. +6. **RBAC with domains/tenants**: users can have different role sets for different domains/tenants. +7. **[ABAC (Attribute-Based Access Control)](https://en.wikipedia.org/wiki/Attribute-Based_Access_Control)**: syntax sugar like ``resource.Owner`` can be used to get the attribute for a resource. +8. **[RESTful](https://en.wikipedia.org/wiki/Representational_state_transfer)**: supports paths like ``/res/*``, ``/res/:id`` and HTTP methods like ``GET``, ``POST``, ``PUT``, ``DELETE``. +9. **Deny-override**: both allow and deny authorizations are supported, deny overrides the allow. +10. **Priority**: the policy rules can be prioritized like firewall rules. + +## How it works? + +In Casbin, an access control model is abstracted into a CONF file based on the **PERM metamodel (Policy, Effect, Request, Matchers)**. So switching or upgrading the authorization mechanism for a project is just as simple as modifying a configuration. You can customize your own access control model by combining the available models. For example, you can get RBAC roles and ABAC attributes together inside one model and share one set of policy rules. + +The most basic and simplest model in Casbin is ACL. ACL's model CONF is: + +```ini +# Request definition +[request_definition] +r = sub, obj, act + +# Policy definition +[policy_definition] +p = sub, obj, act + +# Policy effect +[policy_effect] +e = some(where (p.eft == allow)) + +# Matchers +[matchers] +m = r.sub == p.sub && r.obj == p.obj && r.act == p.act + +``` + +An example policy for ACL model is like: + +``` +p, alice, data1, read +p, bob, data2, write +``` + +It means: + +- alice can read data1 +- bob can write data2 + +We also support multi-line mode by appending '\\' in the end: + +```ini +# Matchers +[matchers] +m = r.sub == p.sub && r.obj == p.obj \ + && r.act == p.act +``` + +Further more, if you are using ABAC, you can try operator `in` like following in Casbin **golang** edition (jCasbin and Node-Casbin are not supported yet): + +```ini +# Matchers +[matchers] +m = r.obj == p.obj && r.act == p.act || r.obj in ('data2', 'data3') +``` + +But you **SHOULD** make sure that the length of the array is **MORE** than **1**, otherwise there will cause it to panic. + +For more operators, you may take a look at [govaluate](https://github.com/casbin/govaluate) + +## Features + +What Casbin does: + +1. enforce the policy in the classic ``{subject, object, action}`` form or a customized form as you defined, both allow and deny authorizations are supported. +2. handle the storage of the access control model and its policy. +3. manage the role-user mappings and role-role mappings (aka role hierarchy in RBAC). +4. support built-in superuser like ``root`` or ``administrator``. A superuser can do anything without explicit permissions. +5. multiple built-in operators to support the rule matching. For example, ``keyMatch`` can map a resource key ``/foo/bar`` to the pattern ``/foo*``. + +What Casbin does NOT do: + +1. authentication (aka verify ``username`` and ``password`` when a user logs in) +2. manage the list of users or roles. I believe it's more convenient for the project itself to manage these entities. Users usually have their passwords, and Casbin is not designed as a password container. However, Casbin stores the user-role mapping for the RBAC scenario. + +## Installation + +``` +go get github.com/casbin/casbin/v2 +``` + +## Documentation + +https://casbin.org/docs/overview + +## Online editor + +You can also use the online editor (https://casbin.org/editor/) to write your Casbin model and policy in your web browser. It provides functionality such as ``syntax highlighting`` and ``code completion``, just like an IDE for a programming language. + +## Tutorials + +https://casbin.org/docs/tutorials + +## Get started + +1. New a Casbin enforcer with a model file and a policy file: + + ```go + e, _ := casbin.NewEnforcer("path/to/model.conf", "path/to/policy.csv") + ``` + +Note: you can also initialize an enforcer with policy in DB instead of file, see [Policy-persistence](#policy-persistence) section for details. + +2. Add an enforcement hook into your code right before the access happens: + + ```go + sub := "alice" // the user that wants to access a resource. + obj := "data1" // the resource that is going to be accessed. + act := "read" // the operation that the user performs on the resource. + + if res, _ := e.Enforce(sub, obj, act); res { + // permit alice to read data1 + } else { + // deny the request, show an error + } + ``` + +3. Besides the static policy file, Casbin also provides API for permission management at run-time. For example, You can get all the roles assigned to a user as below: + + ```go + roles, _ := e.GetImplicitRolesForUser(sub) + ``` + +See [Policy management APIs](#policy-management) for more usage. + +## Policy management + +Casbin provides two sets of APIs to manage permissions: + +- [Management API](https://casbin.org/docs/management-api): the primitive API that provides full support for Casbin policy management. +- [RBAC API](https://casbin.org/docs/rbac-api): a more friendly API for RBAC. This API is a subset of Management API. The RBAC users could use this API to simplify the code. + +We also provide a [web-based UI](https://casbin.org/docs/admin-portal) for model management and policy management: + +![model editor](https://hsluoyz.github.io/casbin/ui_model_editor.png) + +![policy editor](https://hsluoyz.github.io/casbin/ui_policy_editor.png) + +## Policy persistence + +https://casbin.org/docs/adapters + +## Policy consistence between multiple nodes + +https://casbin.org/docs/watchers + +## Role manager + +https://casbin.org/docs/role-managers + +## Benchmarks + +https://casbin.org/docs/benchmark + +## Examples + +| Model | Model file | Policy file | +|---------------------------|----------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------| +| ACL | [basic_model.conf](https://github.com/casbin/casbin/blob/master/examples/basic_model.conf) | [basic_policy.csv](https://github.com/casbin/casbin/blob/master/examples/basic_policy.csv) | +| ACL with superuser | [basic_model_with_root.conf](https://github.com/casbin/casbin/blob/master/examples/basic_with_root_model.conf) | [basic_policy.csv](https://github.com/casbin/casbin/blob/master/examples/basic_policy.csv) | +| ACL without users | [basic_model_without_users.conf](https://github.com/casbin/casbin/blob/master/examples/basic_without_users_model.conf) | [basic_policy_without_users.csv](https://github.com/casbin/casbin/blob/master/examples/basic_without_users_policy.csv) | +| ACL without resources | [basic_model_without_resources.conf](https://github.com/casbin/casbin/blob/master/examples/basic_without_resources_model.conf) | [basic_policy_without_resources.csv](https://github.com/casbin/casbin/blob/master/examples/basic_without_resources_policy.csv) | +| RBAC | [rbac_model.conf](https://github.com/casbin/casbin/blob/master/examples/rbac_model.conf) | [rbac_policy.csv](https://github.com/casbin/casbin/blob/master/examples/rbac_policy.csv) | +| RBAC with resource roles | [rbac_model_with_resource_roles.conf](https://github.com/casbin/casbin/blob/master/examples/rbac_with_resource_roles_model.conf) | [rbac_policy_with_resource_roles.csv](https://github.com/casbin/casbin/blob/master/examples/rbac_with_resource_roles_policy.csv) | +| RBAC with domains/tenants | [rbac_model_with_domains.conf](https://github.com/casbin/casbin/blob/master/examples/rbac_with_domains_model.conf) | [rbac_policy_with_domains.csv](https://github.com/casbin/casbin/blob/master/examples/rbac_with_domains_policy.csv) | +| ABAC | [abac_model.conf](https://github.com/casbin/casbin/blob/master/examples/abac_model.conf) | N/A | +| RESTful | [keymatch_model.conf](https://github.com/casbin/casbin/blob/master/examples/keymatch_model.conf) | [keymatch_policy.csv](https://github.com/casbin/casbin/blob/master/examples/keymatch_policy.csv) | +| Deny-override | [rbac_model_with_deny.conf](https://github.com/casbin/casbin/blob/master/examples/rbac_with_deny_model.conf) | [rbac_policy_with_deny.csv](https://github.com/casbin/casbin/blob/master/examples/rbac_with_deny_policy.csv) | +| Priority | [priority_model.conf](https://github.com/casbin/casbin/blob/master/examples/priority_model.conf) | [priority_policy.csv](https://github.com/casbin/casbin/blob/master/examples/priority_policy.csv) | + +## Middlewares + +Authz middlewares for web frameworks: https://casbin.org/docs/middlewares + +## Our adopters + +https://casbin.org/docs/adopters + +## How to Contribute + +Please read the [contributing guide](CONTRIBUTING.md). + +## Contributors + +This project exists thanks to all the people who contribute. + + +## Backers + +Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/casbin#backer)] + + + +## Sponsors + +Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/casbin#sponsor)] + + + + + + + + + + + + +## Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=casbin/casbin&type=Date)](https://star-history.com/#casbin/casbin&Date) + +## License + +This project is licensed under the [Apache 2.0 license](LICENSE). + +## Contact + +If you have any issues or feature requests, please contact us. PR is welcomed. +- https://github.com/casbin/casbin/issues +- hsluoyz@gmail.com +- Tencent QQ group: [546057381](//shang.qq.com/wpa/qunwpa?idkey=8ac8b91fc97ace3d383d0035f7aa06f7d670fd8e8d4837347354a31c18fac885) diff --git a/vendor/github.com/casbin/casbin/v2/casbin-logo.png b/vendor/github.com/casbin/casbin/v2/casbin-logo.png new file mode 100644 index 000000000..7e5d1ecf9 Binary files /dev/null and b/vendor/github.com/casbin/casbin/v2/casbin-logo.png differ diff --git a/vendor/github.com/casbin/casbin/v2/config/config.go b/vendor/github.com/casbin/casbin/v2/config/config.go new file mode 100644 index 000000000..57d40d849 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/config/config.go @@ -0,0 +1,267 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +var ( + // DEFAULT_SECTION specifies the name of a section if no name provided. + DEFAULT_SECTION = "default" + // DEFAULT_COMMENT defines what character(s) indicate a comment `#`. + DEFAULT_COMMENT = []byte{'#'} + // DEFAULT_COMMENT_SEM defines what alternate character(s) indicate a comment `;`. + DEFAULT_COMMENT_SEM = []byte{';'} + // DEFAULT_MULTI_LINE_SEPARATOR defines what character indicates a multi-line content. + DEFAULT_MULTI_LINE_SEPARATOR = []byte{'\\'} +) + +// ConfigInterface defines the behavior of a Config implementation. +type ConfigInterface interface { + String(key string) string + Strings(key string) []string + Bool(key string) (bool, error) + Int(key string) (int, error) + Int64(key string) (int64, error) + Float64(key string) (float64, error) + Set(key string, value string) error +} + +// Config represents an implementation of the ConfigInterface. +type Config struct { + // Section:key=value + data map[string]map[string]string +} + +// NewConfig create an empty configuration representation from file. +func NewConfig(confName string) (ConfigInterface, error) { + c := &Config{ + data: make(map[string]map[string]string), + } + err := c.parse(confName) + return c, err +} + +// NewConfigFromText create an empty configuration representation from text. +func NewConfigFromText(text string) (ConfigInterface, error) { + c := &Config{ + data: make(map[string]map[string]string), + } + err := c.parseBuffer(bufio.NewReader(strings.NewReader(text))) + return c, err +} + +// AddConfig adds a new section->key:value to the configuration. +func (c *Config) AddConfig(section string, option string, value string) bool { + if section == "" { + section = DEFAULT_SECTION + } + + if _, ok := c.data[section]; !ok { + c.data[section] = make(map[string]string) + } + + _, ok := c.data[section][option] + c.data[section][option] = value + + return !ok +} + +func (c *Config) parse(fname string) (err error) { + f, err := os.Open(fname) + if err != nil { + return err + } + defer f.Close() + + buf := bufio.NewReader(f) + return c.parseBuffer(buf) +} + +func (c *Config) parseBuffer(buf *bufio.Reader) error { + var section string + var lineNum int + var buffer bytes.Buffer + var canWrite bool + for { + if canWrite { + if err := c.write(section, lineNum, &buffer); err != nil { + return err + } else { + canWrite = false + } + } + lineNum++ + line, _, err := buf.ReadLine() + if err == io.EOF { + // force write when buffer is not flushed yet + if buffer.Len() > 0 { + if err = c.write(section, lineNum, &buffer); err != nil { + return err + } + } + break + } else if err != nil { + return err + } + + line = bytes.TrimSpace(line) + switch { + case bytes.Equal(line, []byte{}), bytes.HasPrefix(line, DEFAULT_COMMENT_SEM), + bytes.HasPrefix(line, DEFAULT_COMMENT): + canWrite = true + continue + case bytes.HasPrefix(line, []byte{'['}) && bytes.HasSuffix(line, []byte{']'}): + // force write when buffer is not flushed yet + if buffer.Len() > 0 { + if err := c.write(section, lineNum, &buffer); err != nil { + return err + } + canWrite = false + } + section = string(line[1 : len(line)-1]) + default: + var p []byte + if bytes.HasSuffix(line, DEFAULT_MULTI_LINE_SEPARATOR) { + p = bytes.TrimSpace(line[:len(line)-1]) + p = append(p, " "...) + } else { + p = line + canWrite = true + } + + end := len(p) + for i, value := range p { + if value == DEFAULT_COMMENT[0] || value == DEFAULT_COMMENT_SEM[0] { + end = i + break + } + } + if _, err := buffer.Write(p[:end]); err != nil { + return err + } + } + } + + return nil +} + +func (c *Config) write(section string, lineNum int, b *bytes.Buffer) error { + if b.Len() <= 0 { + return nil + } + + optionVal := bytes.SplitN(b.Bytes(), []byte{'='}, 2) + if len(optionVal) != 2 { + return fmt.Errorf("parse the content error : line %d , %s = ? ", lineNum, optionVal[0]) + } + option := bytes.TrimSpace(optionVal[0]) + value := bytes.TrimSpace(optionVal[1]) + c.AddConfig(section, string(option), string(value)) + + // flush buffer after adding + b.Reset() + + return nil +} + +// Bool lookups up the value using the provided key and converts the value to a bool. +func (c *Config) Bool(key string) (bool, error) { + return strconv.ParseBool(c.get(key)) +} + +// Int lookups up the value using the provided key and converts the value to a int. +func (c *Config) Int(key string) (int, error) { + return strconv.Atoi(c.get(key)) +} + +// Int64 lookups up the value using the provided key and converts the value to a int64. +func (c *Config) Int64(key string) (int64, error) { + return strconv.ParseInt(c.get(key), 10, 64) +} + +// Float64 lookups up the value using the provided key and converts the value to a float64. +func (c *Config) Float64(key string) (float64, error) { + return strconv.ParseFloat(c.get(key), 64) +} + +// String lookups up the value using the provided key and converts the value to a string. +func (c *Config) String(key string) string { + return c.get(key) +} + +// Strings lookups up the value using the provided key and converts the value to an array of string +// by splitting the string by comma. +func (c *Config) Strings(key string) []string { + v := c.get(key) + if v == "" { + return nil + } + return strings.Split(v, ",") +} + +// Set sets the value for the specific key in the Config. +func (c *Config) Set(key string, value string) error { + if len(key) == 0 { + return errors.New("key is empty") + } + + var ( + section string + option string + ) + + keys := strings.Split(strings.ToLower(key), "::") + if len(keys) >= 2 { + section = keys[0] + option = keys[1] + } else { + option = keys[0] + } + + c.AddConfig(section, option, value) + return nil +} + +// section.key or key. +func (c *Config) get(key string) string { + var ( + section string + option string + ) + + keys := strings.Split(strings.ToLower(key), "::") + if len(keys) >= 2 { + section = keys[0] + option = keys[1] + } else { + section = DEFAULT_SECTION + option = keys[0] + } + + if value, ok := c.data[section][option]; ok { + return value + } + + return "" +} diff --git a/vendor/github.com/casbin/casbin/v2/constant/constants.go b/vendor/github.com/casbin/casbin/v2/constant/constants.go new file mode 100644 index 000000000..4140ecf3f --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/constant/constants.go @@ -0,0 +1,31 @@ +// Copyright 2022 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package constant + +const ( + ActionIndex = "act" + DomainIndex = "dom" + SubjectIndex = "sub" + ObjectIndex = "obj" + PriorityIndex = "priority" +) + +const ( + AllowOverrideEffect = "some(where (p_eft == allow))" + DenyOverrideEffect = "!some(where (p_eft == deny))" + AllowAndDenyEffect = "some(where (p_eft == allow)) && !some(where (p_eft == deny))" + PriorityEffect = "priority(p_eft) || deny" + SubjectPriorityEffect = "subjectPriority(p_eft) || deny" +) diff --git a/vendor/github.com/casbin/casbin/v2/effector/default_effector.go b/vendor/github.com/casbin/casbin/v2/effector/default_effector.go new file mode 100644 index 000000000..feb083a68 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/effector/default_effector.go @@ -0,0 +1,109 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package effector + +import ( + "errors" + + "github.com/casbin/casbin/v2/constant" +) + +// DefaultEffector is default effector for Casbin. +type DefaultEffector struct { +} + +// NewDefaultEffector is the constructor for DefaultEffector. +func NewDefaultEffector() *DefaultEffector { + e := DefaultEffector{} + return &e +} + +// MergeEffects merges all matching results collected by the enforcer into a single decision. +func (e *DefaultEffector) MergeEffects(expr string, effects []Effect, matches []float64, policyIndex int, policyLength int) (Effect, int, error) { + result := Indeterminate + explainIndex := -1 + + switch expr { + case constant.AllowOverrideEffect: + if matches[policyIndex] == 0 { + break + } + // only check the current policyIndex + if effects[policyIndex] == Allow { + result = Allow + explainIndex = policyIndex + break + } + case constant.DenyOverrideEffect: + // only check the current policyIndex + if matches[policyIndex] != 0 && effects[policyIndex] == Deny { + result = Deny + explainIndex = policyIndex + break + } + // if no deny rules are matched at last, then allow + if policyIndex == policyLength-1 { + result = Allow + } + case constant.AllowAndDenyEffect: + // short-circuit if matched deny rule + if matches[policyIndex] != 0 && effects[policyIndex] == Deny { + result = Deny + // set hit rule to the (first) matched deny rule + explainIndex = policyIndex + break + } + + // short-circuit some effects in the middle + if policyIndex < policyLength-1 { + // choose not to short-circuit + return result, explainIndex, nil + } + // merge all effects at last + for i, eft := range effects { + if matches[i] == 0 { + continue + } + + if eft == Allow { + result = Allow + // set hit rule to first matched allow rule + explainIndex = i + break + } + } + case constant.PriorityEffect, constant.SubjectPriorityEffect: + // reverse merge, short-circuit may be earlier + for i := len(effects) - 1; i >= 0; i-- { + if matches[i] == 0 { + continue + } + + if effects[i] != Indeterminate { + if effects[i] == Allow { + result = Allow + } else { + result = Deny + } + explainIndex = i + break + } + } + default: + return Deny, -1, errors.New("unsupported effect") + } + + return result, explainIndex, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/effector/effector.go b/vendor/github.com/casbin/casbin/v2/effector/effector.go new file mode 100644 index 000000000..49b84c3e1 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/effector/effector.go @@ -0,0 +1,31 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package effector //nolint:cyclop // TODO + +// Effect is the result for a policy rule. +type Effect int + +// Values for policy effect. +const ( + Allow Effect = iota + Indeterminate + Deny +) + +// Effector is the interface for Casbin effectors. +type Effector interface { + // MergeEffects merges all matching results collected by the enforcer into a single decision. + MergeEffects(expr string, effects []Effect, matches []float64, policyIndex int, policyLength int) (Effect, int, error) +} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer.go b/vendor/github.com/casbin/casbin/v2/enforcer.go new file mode 100644 index 000000000..fd3f43a8d --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/enforcer.go @@ -0,0 +1,1004 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "errors" + "fmt" + "runtime/debug" + "strings" + "sync" + + "github.com/casbin/casbin/v2/effector" + "github.com/casbin/casbin/v2/log" + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" + fileadapter "github.com/casbin/casbin/v2/persist/file-adapter" + "github.com/casbin/casbin/v2/rbac" + defaultrolemanager "github.com/casbin/casbin/v2/rbac/default-role-manager" + "github.com/casbin/casbin/v2/util" + + "github.com/casbin/govaluate" +) + +// Enforcer is the main interface for authorization enforcement and policy management. +type Enforcer struct { + modelPath string + model model.Model + fm model.FunctionMap + eft effector.Effector + + adapter persist.Adapter + watcher persist.Watcher + dispatcher persist.Dispatcher + rmMap map[string]rbac.RoleManager + condRmMap map[string]rbac.ConditionalRoleManager + matcherMap sync.Map + + enabled bool + autoSave bool + autoBuildRoleLinks bool + autoNotifyWatcher bool + autoNotifyDispatcher bool + acceptJsonRequest bool + + logger log.Logger +} + +// EnforceContext is used as the first element of the parameter "rvals" in method "enforce". +type EnforceContext struct { + RType string + PType string + EType string + MType string +} + +func (e EnforceContext) GetCacheKey() string { + return "EnforceContext{" + e.RType + "-" + e.PType + "-" + e.EType + "-" + e.MType + "}" +} + +// NewEnforcer creates an enforcer via file or DB. +// +// File: +// +// e := casbin.NewEnforcer("path/to/basic_model.conf", "path/to/basic_policy.csv") +// +// MySQL DB: +// +// a := mysqladapter.NewDBAdapter("mysql", "mysql_username:mysql_password@tcp(127.0.0.1:3306)/") +// e := casbin.NewEnforcer("path/to/basic_model.conf", a) +func NewEnforcer(params ...interface{}) (*Enforcer, error) { + e := &Enforcer{logger: &log.DefaultLogger{}} + + parsedParamLen := 0 + paramLen := len(params) + if paramLen >= 1 { + enableLog, ok := params[paramLen-1].(bool) + if ok { + e.EnableLog(enableLog) + parsedParamLen++ + } + } + + if paramLen-parsedParamLen >= 1 { + logger, ok := params[paramLen-parsedParamLen-1].(log.Logger) + if ok { + e.logger = logger + parsedParamLen++ + } + } + + switch paramLen - parsedParamLen { + case 2: + switch p0 := params[0].(type) { + case string: + switch p1 := params[1].(type) { + case string: + err := e.InitWithFile(p0, p1) + if err != nil { + return nil, err + } + default: + err := e.InitWithAdapter(p0, p1.(persist.Adapter)) + if err != nil { + return nil, err + } + } + default: + switch params[1].(type) { + case string: + return nil, errors.New("invalid parameters for enforcer") + default: + err := e.InitWithModelAndAdapter(p0.(model.Model), params[1].(persist.Adapter)) + if err != nil { + return nil, err + } + } + } + case 1: + switch p0 := params[0].(type) { + case string: + err := e.InitWithFile(p0, "") + if err != nil { + return nil, err + } + default: + err := e.InitWithModelAndAdapter(p0.(model.Model), nil) + if err != nil { + return nil, err + } + } + case 0: + return e, nil + default: + return nil, errors.New("invalid parameters for enforcer") + } + + return e, nil +} + +// InitWithFile initializes an enforcer with a model file and a policy file. +func (e *Enforcer) InitWithFile(modelPath string, policyPath string) error { + a := fileadapter.NewAdapter(policyPath) + return e.InitWithAdapter(modelPath, a) +} + +// InitWithAdapter initializes an enforcer with a database adapter. +func (e *Enforcer) InitWithAdapter(modelPath string, adapter persist.Adapter) error { + m, err := model.NewModelFromFile(modelPath) + if err != nil { + return err + } + + err = e.InitWithModelAndAdapter(m, adapter) + if err != nil { + return err + } + + e.modelPath = modelPath + return nil +} + +// InitWithModelAndAdapter initializes an enforcer with a model and a database adapter. +func (e *Enforcer) InitWithModelAndAdapter(m model.Model, adapter persist.Adapter) error { + e.adapter = adapter + + e.model = m + m.SetLogger(e.logger) + e.model.PrintModel() + e.fm = model.LoadFunctionMap() + + e.initialize() + + // Do not initialize the full policy when using a filtered adapter + fa, ok := e.adapter.(persist.FilteredAdapter) + if e.adapter != nil && (!ok || ok && !fa.IsFiltered()) { + err := e.LoadPolicy() + if err != nil { + return err + } + } + + return nil +} + +// SetLogger changes the current enforcer's logger. +func (e *Enforcer) SetLogger(logger log.Logger) { + e.logger = logger + e.model.SetLogger(e.logger) + for k := range e.rmMap { + e.rmMap[k].SetLogger(e.logger) + } + for k := range e.condRmMap { + e.condRmMap[k].SetLogger(e.logger) + } +} + +func (e *Enforcer) initialize() { + e.rmMap = map[string]rbac.RoleManager{} + e.condRmMap = map[string]rbac.ConditionalRoleManager{} + e.eft = effector.NewDefaultEffector() + e.watcher = nil + e.matcherMap = sync.Map{} + + e.enabled = true + e.autoSave = true + e.autoBuildRoleLinks = true + e.autoNotifyWatcher = true + e.autoNotifyDispatcher = true + e.initRmMap() +} + +// LoadModel reloads the model from the model CONF file. +// Because the policy is attached to a model, so the policy is invalidated and needs to be reloaded by calling LoadPolicy(). +func (e *Enforcer) LoadModel() error { + var err error + e.model, err = model.NewModelFromFile(e.modelPath) + if err != nil { + return err + } + e.model.SetLogger(e.logger) + + e.model.PrintModel() + e.fm = model.LoadFunctionMap() + + e.initialize() + + return nil +} + +// GetModel gets the current model. +func (e *Enforcer) GetModel() model.Model { + return e.model +} + +// SetModel sets the current model. +func (e *Enforcer) SetModel(m model.Model) { + e.model = m + e.fm = model.LoadFunctionMap() + + e.model.SetLogger(e.logger) + e.initialize() +} + +// GetAdapter gets the current adapter. +func (e *Enforcer) GetAdapter() persist.Adapter { + return e.adapter +} + +// SetAdapter sets the current adapter. +func (e *Enforcer) SetAdapter(adapter persist.Adapter) { + e.adapter = adapter +} + +// SetWatcher sets the current watcher. +func (e *Enforcer) SetWatcher(watcher persist.Watcher) error { + e.watcher = watcher + if _, ok := e.watcher.(persist.WatcherEx); ok { + // The callback of WatcherEx has no generic implementation. + return nil + } else { + // In case the Watcher wants to use a customized callback function, call `SetUpdateCallback` after `SetWatcher`. + return watcher.SetUpdateCallback(func(string) { _ = e.LoadPolicy() }) + } +} + +// GetRoleManager gets the current role manager. +func (e *Enforcer) GetRoleManager() rbac.RoleManager { + if e.rmMap != nil && e.rmMap["g"] != nil { + return e.rmMap["g"] + } else { + return nil + } +} + +// GetNamedRoleManager gets the role manager for the named policy. +func (e *Enforcer) GetNamedRoleManager(ptype string) rbac.RoleManager { + if e.rmMap != nil && e.rmMap[ptype] != nil { + return e.rmMap[ptype] + } else { + return nil + } +} + +// SetRoleManager sets the current role manager. +func (e *Enforcer) SetRoleManager(rm rbac.RoleManager) { + e.invalidateMatcherMap() + e.rmMap["g"] = rm +} + +// SetNamedRoleManager sets the role manager for the named policy. +func (e *Enforcer) SetNamedRoleManager(ptype string, rm rbac.RoleManager) { + e.invalidateMatcherMap() + e.rmMap[ptype] = rm +} + +// SetEffector sets the current effector. +func (e *Enforcer) SetEffector(eft effector.Effector) { + e.eft = eft +} + +// ClearPolicy clears all policy. +func (e *Enforcer) ClearPolicy() { + e.invalidateMatcherMap() + + if e.dispatcher != nil && e.autoNotifyDispatcher { + _ = e.dispatcher.ClearPolicy() + return + } + e.model.ClearPolicy() +} + +// LoadPolicy reloads the policy from file/database. +func (e *Enforcer) LoadPolicy() error { + newModel, err := e.loadPolicyFromAdapter(e.model) + if err != nil { + return err + } + err = e.applyModifiedModel(newModel) + if err != nil { + return err + } + return nil +} + +func (e *Enforcer) loadPolicyFromAdapter(baseModel model.Model) (model.Model, error) { + newModel := baseModel.Copy() + newModel.ClearPolicy() + + if err := e.adapter.LoadPolicy(newModel); err != nil && err.Error() != "invalid file path, file path cannot be empty" { + return nil, err + } + + if err := newModel.SortPoliciesBySubjectHierarchy(); err != nil { + return nil, err + } + + if err := newModel.SortPoliciesByPriority(); err != nil { + return nil, err + } + + return newModel, nil +} + +func (e *Enforcer) applyModifiedModel(newModel model.Model) error { + var err error + needToRebuild := false + defer func() { + if err != nil { + if e.autoBuildRoleLinks && needToRebuild { + _ = e.BuildRoleLinks() + } + } + }() + + if e.autoBuildRoleLinks { + needToRebuild = true + + if err := e.rebuildRoleLinks(newModel); err != nil { + return err + } + + if err := e.rebuildConditionalRoleLinks(newModel); err != nil { + return err + } + } + + e.model = newModel + e.invalidateMatcherMap() + return nil +} + +func (e *Enforcer) rebuildRoleLinks(newModel model.Model) error { + if len(e.rmMap) != 0 { + for _, rm := range e.rmMap { + err := rm.Clear() + if err != nil { + return err + } + } + + err := newModel.BuildRoleLinks(e.rmMap) + if err != nil { + return err + } + } + + return nil +} + +func (e *Enforcer) rebuildConditionalRoleLinks(newModel model.Model) error { + if len(e.condRmMap) != 0 { + for _, crm := range e.condRmMap { + err := crm.Clear() + if err != nil { + return err + } + } + + err := newModel.BuildConditionalRoleLinks(e.condRmMap) + if err != nil { + return err + } + } + return nil +} + +func (e *Enforcer) loadFilteredPolicy(filter interface{}) error { + e.invalidateMatcherMap() + + var filteredAdapter persist.FilteredAdapter + + // Attempt to cast the Adapter as a FilteredAdapter + switch adapter := e.adapter.(type) { + case persist.FilteredAdapter: + filteredAdapter = adapter + default: + return errors.New("filtered policies are not supported by this adapter") + } + if err := filteredAdapter.LoadFilteredPolicy(e.model, filter); err != nil && err.Error() != "invalid file path, file path cannot be empty" { + return err + } + + if err := e.model.SortPoliciesBySubjectHierarchy(); err != nil { + return err + } + + if err := e.model.SortPoliciesByPriority(); err != nil { + return err + } + + e.initRmMap() + e.model.PrintPolicy() + if e.autoBuildRoleLinks { + err := e.BuildRoleLinks() + if err != nil { + return err + } + } + return nil +} + +// LoadFilteredPolicy reloads a filtered policy from file/database. +func (e *Enforcer) LoadFilteredPolicy(filter interface{}) error { + e.model.ClearPolicy() + + return e.loadFilteredPolicy(filter) +} + +// LoadIncrementalFilteredPolicy append a filtered policy from file/database. +func (e *Enforcer) LoadIncrementalFilteredPolicy(filter interface{}) error { + return e.loadFilteredPolicy(filter) +} + +// IsFiltered returns true if the loaded policy has been filtered. +func (e *Enforcer) IsFiltered() bool { + filteredAdapter, ok := e.adapter.(persist.FilteredAdapter) + if !ok { + return false + } + return filteredAdapter.IsFiltered() +} + +// SavePolicy saves the current policy (usually after changed with Casbin API) back to file/database. +func (e *Enforcer) SavePolicy() error { + if e.IsFiltered() { + return errors.New("cannot save a filtered policy") + } + if err := e.adapter.SavePolicy(e.model); err != nil { + return err + } + if e.watcher != nil { + var err error + if watcher, ok := e.watcher.(persist.WatcherEx); ok { + err = watcher.UpdateForSavePolicy(e.model) + } else { + err = e.watcher.Update() + } + return err + } + return nil +} + +func (e *Enforcer) initRmMap() { + for ptype, assertion := range e.model["g"] { + if rm, ok := e.rmMap[ptype]; ok { + _ = rm.Clear() + continue + } + if len(assertion.Tokens) <= 2 && len(assertion.ParamsTokens) == 0 { + assertion.RM = defaultrolemanager.NewRoleManagerImpl(10) + e.rmMap[ptype] = assertion.RM + } + if len(assertion.Tokens) <= 2 && len(assertion.ParamsTokens) != 0 { + assertion.CondRM = defaultrolemanager.NewConditionalRoleManager(10) + e.condRmMap[ptype] = assertion.CondRM + } + if len(assertion.Tokens) > 2 { + if len(assertion.ParamsTokens) == 0 { + assertion.RM = defaultrolemanager.NewRoleManager(10) + e.rmMap[ptype] = assertion.RM + } else { + assertion.CondRM = defaultrolemanager.NewConditionalDomainManager(10) + e.condRmMap[ptype] = assertion.CondRM + } + matchFun := "keyMatch(r_dom, p_dom)" + if strings.Contains(e.model["m"]["m"].Value, matchFun) { + e.AddNamedDomainMatchingFunc(ptype, "g", util.KeyMatch) + } + } + } +} + +// EnableEnforce changes the enforcing state of Casbin, when Casbin is disabled, all access will be allowed by the Enforce() function. +func (e *Enforcer) EnableEnforce(enable bool) { + e.enabled = enable +} + +// EnableLog changes whether Casbin will log messages to the Logger. +func (e *Enforcer) EnableLog(enable bool) { + e.logger.EnableLog(enable) +} + +// IsLogEnabled returns the current logger's enabled status. +func (e *Enforcer) IsLogEnabled() bool { + return e.logger.IsEnabled() +} + +// EnableAutoNotifyWatcher controls whether to save a policy rule automatically notify the Watcher when it is added or removed. +func (e *Enforcer) EnableAutoNotifyWatcher(enable bool) { + e.autoNotifyWatcher = enable +} + +// EnableAutoNotifyDispatcher controls whether to save a policy rule automatically notify the Dispatcher when it is added or removed. +func (e *Enforcer) EnableAutoNotifyDispatcher(enable bool) { + e.autoNotifyDispatcher = enable +} + +// EnableAutoSave controls whether to save a policy rule automatically to the adapter when it is added or removed. +func (e *Enforcer) EnableAutoSave(autoSave bool) { + e.autoSave = autoSave +} + +// EnableAutoBuildRoleLinks controls whether to rebuild the role inheritance relations when a role is added or deleted. +func (e *Enforcer) EnableAutoBuildRoleLinks(autoBuildRoleLinks bool) { + e.autoBuildRoleLinks = autoBuildRoleLinks +} + +// EnableAcceptJsonRequest controls whether to accept json as a request parameter. +func (e *Enforcer) EnableAcceptJsonRequest(acceptJsonRequest bool) { + e.acceptJsonRequest = acceptJsonRequest +} + +// BuildRoleLinks manually rebuild the role inheritance relations. +func (e *Enforcer) BuildRoleLinks() error { + if e.rmMap == nil { + return errors.New("rmMap is nil") + } + for _, rm := range e.rmMap { + err := rm.Clear() + if err != nil { + return err + } + } + + return e.model.BuildRoleLinks(e.rmMap) +} + +// BuildIncrementalRoleLinks provides incremental build the role inheritance relations. +func (e *Enforcer) BuildIncrementalRoleLinks(op model.PolicyOp, ptype string, rules [][]string) error { + e.invalidateMatcherMap() + return e.model.BuildIncrementalRoleLinks(e.rmMap, op, "g", ptype, rules) +} + +// BuildIncrementalConditionalRoleLinks provides incremental build the role inheritance relations with conditions. +func (e *Enforcer) BuildIncrementalConditionalRoleLinks(op model.PolicyOp, ptype string, rules [][]string) error { + e.invalidateMatcherMap() + return e.model.BuildIncrementalConditionalRoleLinks(e.condRmMap, op, "g", ptype, rules) +} + +// NewEnforceContext Create a default structure based on the suffix. +func NewEnforceContext(suffix string) EnforceContext { + return EnforceContext{ + RType: "r" + suffix, + PType: "p" + suffix, + EType: "e" + suffix, + MType: "m" + suffix, + } +} + +func (e *Enforcer) invalidateMatcherMap() { + e.matcherMap = sync.Map{} +} + +// enforce use a custom matcher to decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (matcher, sub, obj, act), use model matcher by default when matcher is "". +func (e *Enforcer) enforce(matcher string, explains *[]string, rvals ...interface{}) (ok bool, err error) { //nolint:funlen,cyclop,gocyclo // TODO: reduce function complexity + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic: %v\n%s", r, debug.Stack()) + } + }() + + if !e.enabled { + return true, nil + } + + functions := e.fm.GetFunctions() + if _, ok := e.model["g"]; ok { + for key, ast := range e.model["g"] { + // g must be a normal role definition (ast.RM != nil) + // or a conditional role definition (ast.CondRM != nil) + // ast.RM and ast.CondRM shouldn't be nil at the same time + if ast.RM != nil { + functions[key] = util.GenerateGFunction(ast.RM) + } + if ast.CondRM != nil { + functions[key] = util.GenerateConditionalGFunction(ast.CondRM) + } + } + } + + var ( + rType = "r" + pType = "p" + eType = "e" + mType = "m" + ) + if len(rvals) != 0 { + switch rvals[0].(type) { + case EnforceContext: + enforceContext := rvals[0].(EnforceContext) + rType = enforceContext.RType + pType = enforceContext.PType + eType = enforceContext.EType + mType = enforceContext.MType + rvals = rvals[1:] + default: + break + } + } + + var expString string + if matcher == "" { + expString = e.model["m"][mType].Value + } else { + expString = util.RemoveComments(util.EscapeAssertion(matcher)) + } + + rTokens := make(map[string]int, len(e.model["r"][rType].Tokens)) + for i, token := range e.model["r"][rType].Tokens { + rTokens[token] = i + } + pTokens := make(map[string]int, len(e.model["p"][pType].Tokens)) + for i, token := range e.model["p"][pType].Tokens { + pTokens[token] = i + } + + if e.acceptJsonRequest { + // try to parse all request values from json to map[string]interface{} + // skip if there is an error + for i, rval := range rvals { + switch rval := rval.(type) { + case string: + var mapValue map[string]interface{} + mapValue, err = util.JsonToMap(rval) + if err == nil { + rvals[i] = mapValue + } + } + } + } + + parameters := enforceParameters{ + rTokens: rTokens, + rVals: rvals, + + pTokens: pTokens, + } + + hasEval := util.HasEval(expString) + if hasEval { + functions["eval"] = generateEvalFunction(functions, ¶meters) + } + var expression *govaluate.EvaluableExpression + expression, err = e.getAndStoreMatcherExpression(hasEval, expString, functions) + if err != nil { + return false, err + } + + if len(e.model["r"][rType].Tokens) != len(rvals) { + return false, fmt.Errorf( + "invalid request size: expected %d, got %d, rvals: %v", + len(e.model["r"][rType].Tokens), + len(rvals), + rvals) + } + + var policyEffects []effector.Effect + var matcherResults []float64 + + var effect effector.Effect + var explainIndex int + + if policyLen := len(e.model["p"][pType].Policy); policyLen != 0 && strings.Contains(expString, pType+"_") { //nolint:nestif // TODO: reduce function complexity + policyEffects = make([]effector.Effect, policyLen) + matcherResults = make([]float64, policyLen) + + for policyIndex, pvals := range e.model["p"][pType].Policy { + // log.LogPrint("Policy Rule: ", pvals) + if len(e.model["p"][pType].Tokens) != len(pvals) { + return false, fmt.Errorf( + "invalid policy size: expected %d, got %d, pvals: %v", + len(e.model["p"][pType].Tokens), + len(pvals), + pvals) + } + + parameters.pVals = pvals + + result, err := expression.Eval(parameters) + // log.LogPrint("Result: ", result) + + if err != nil { + return false, err + } + + // set to no-match at first + matcherResults[policyIndex] = 0 + switch result := result.(type) { + case bool: + if result { + matcherResults[policyIndex] = 1 + } + case float64: + if result != 0 { + matcherResults[policyIndex] = 1 + } + default: + return false, errors.New("matcher result should be bool, int or float") + } + + if j, ok := parameters.pTokens[pType+"_eft"]; ok { + eft := parameters.pVals[j] + if eft == "allow" { + policyEffects[policyIndex] = effector.Allow + } else if eft == "deny" { + policyEffects[policyIndex] = effector.Deny + } else { + policyEffects[policyIndex] = effector.Indeterminate + } + } else { + policyEffects[policyIndex] = effector.Allow + } + + // if e.model["e"]["e"].Value == "priority(p_eft) || deny" { + // break + // } + + effect, explainIndex, err = e.eft.MergeEffects(e.model["e"][eType].Value, policyEffects, matcherResults, policyIndex, policyLen) + if err != nil { + return false, err + } + if effect != effector.Indeterminate { + break + } + } + } else { + if hasEval && len(e.model["p"][pType].Policy) == 0 { + return false, errors.New("please make sure rule exists in policy when using eval() in matcher") + } + + policyEffects = make([]effector.Effect, 1) + matcherResults = make([]float64, 1) + matcherResults[0] = 1 + + parameters.pVals = make([]string, len(parameters.pTokens)) + + result, err := expression.Eval(parameters) + + if err != nil { + return false, err + } + + if result.(bool) { + policyEffects[0] = effector.Allow + } else { + policyEffects[0] = effector.Indeterminate + } + + effect, explainIndex, err = e.eft.MergeEffects(e.model["e"][eType].Value, policyEffects, matcherResults, 0, 1) + if err != nil { + return false, err + } + } + + var logExplains [][]string + + if explains != nil { + if len(*explains) > 0 { + logExplains = append(logExplains, *explains) + } + + if explainIndex != -1 && len(e.model["p"][pType].Policy) > explainIndex { + *explains = e.model["p"][pType].Policy[explainIndex] + logExplains = append(logExplains, *explains) + } + } + + // effect -> result + result := false + if effect == effector.Allow { + result = true + } + e.logger.LogEnforce(expString, rvals, result, logExplains) + + return result, nil +} + +func (e *Enforcer) getAndStoreMatcherExpression(hasEval bool, expString string, functions map[string]govaluate.ExpressionFunction) (*govaluate.EvaluableExpression, error) { + var expression *govaluate.EvaluableExpression + var err error + var cachedExpression, isPresent = e.matcherMap.Load(expString) + + if !hasEval && isPresent { + expression = cachedExpression.(*govaluate.EvaluableExpression) + } else { + expression, err = govaluate.NewEvaluableExpressionWithFunctions(expString, functions) + if err != nil { + return nil, err + } + e.matcherMap.Store(expString, expression) + } + return expression, nil +} + +// Enforce decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (sub, obj, act). +func (e *Enforcer) Enforce(rvals ...interface{}) (bool, error) { + return e.enforce("", nil, rvals...) +} + +// EnforceWithMatcher use a custom matcher to decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (matcher, sub, obj, act), use model matcher by default when matcher is "". +func (e *Enforcer) EnforceWithMatcher(matcher string, rvals ...interface{}) (bool, error) { + return e.enforce(matcher, nil, rvals...) +} + +// EnforceEx explain enforcement by informing matched rules. +func (e *Enforcer) EnforceEx(rvals ...interface{}) (bool, []string, error) { + explain := []string{} + result, err := e.enforce("", &explain, rvals...) + return result, explain, err +} + +// EnforceExWithMatcher use a custom matcher and explain enforcement by informing matched rules. +func (e *Enforcer) EnforceExWithMatcher(matcher string, rvals ...interface{}) (bool, []string, error) { + explain := []string{} + result, err := e.enforce(matcher, &explain, rvals...) + return result, explain, err +} + +// BatchEnforce enforce in batches. +func (e *Enforcer) BatchEnforce(requests [][]interface{}) ([]bool, error) { + var results []bool + for _, request := range requests { + result, err := e.enforce("", nil, request...) + if err != nil { + return results, err + } + results = append(results, result) + } + return results, nil +} + +// BatchEnforceWithMatcher enforce with matcher in batches. +func (e *Enforcer) BatchEnforceWithMatcher(matcher string, requests [][]interface{}) ([]bool, error) { + var results []bool + for _, request := range requests { + result, err := e.enforce(matcher, nil, request...) + if err != nil { + return results, err + } + results = append(results, result) + } + return results, nil +} + +// AddNamedMatchingFunc add MatchingFunc by ptype RoleManager. +func (e *Enforcer) AddNamedMatchingFunc(ptype, name string, fn rbac.MatchingFunc) bool { + if rm, ok := e.rmMap[ptype]; ok { + rm.AddMatchingFunc(name, fn) + return true + } + return false +} + +// AddNamedDomainMatchingFunc add MatchingFunc by ptype to RoleManager. +func (e *Enforcer) AddNamedDomainMatchingFunc(ptype, name string, fn rbac.MatchingFunc) bool { + if rm, ok := e.rmMap[ptype]; ok { + rm.AddDomainMatchingFunc(name, fn) + return true + } + return false +} + +// AddNamedLinkConditionFunc Add condition function fn for Link userName->roleName, +// when fn returns true, Link is valid, otherwise invalid. +func (e *Enforcer) AddNamedLinkConditionFunc(ptype, user, role string, fn rbac.LinkConditionFunc) bool { + if rm, ok := e.condRmMap[ptype]; ok { + rm.AddLinkConditionFunc(user, role, fn) + return true + } + return false +} + +// AddNamedDomainLinkConditionFunc Add condition function fn for Link userName-> {roleName, domain}, +// when fn returns true, Link is valid, otherwise invalid. +func (e *Enforcer) AddNamedDomainLinkConditionFunc(ptype, user, role string, domain string, fn rbac.LinkConditionFunc) bool { + if rm, ok := e.condRmMap[ptype]; ok { + rm.AddDomainLinkConditionFunc(user, role, domain, fn) + return true + } + return false +} + +// SetNamedLinkConditionFuncParams Sets the parameters of the condition function fn for Link userName->roleName. +func (e *Enforcer) SetNamedLinkConditionFuncParams(ptype, user, role string, params ...string) bool { + if rm, ok := e.condRmMap[ptype]; ok { + rm.SetLinkConditionFuncParams(user, role, params...) + return true + } + return false +} + +// SetNamedDomainLinkConditionFuncParams Sets the parameters of the condition function fn +// for Link userName->{roleName, domain}. +func (e *Enforcer) SetNamedDomainLinkConditionFuncParams(ptype, user, role, domain string, params ...string) bool { + if rm, ok := e.condRmMap[ptype]; ok { + rm.SetDomainLinkConditionFuncParams(user, role, domain, params...) + return true + } + return false +} + +// assumes bounds have already been checked. +type enforceParameters struct { + rTokens map[string]int + rVals []interface{} + + pTokens map[string]int + pVals []string +} + +// implements govaluate.Parameters. +func (p enforceParameters) Get(name string) (interface{}, error) { + if name == "" { + return nil, nil + } + + switch name[0] { + case 'p': + i, ok := p.pTokens[name] + if !ok { + return nil, errors.New("No parameter '" + name + "' found.") + } + return p.pVals[i], nil + case 'r': + i, ok := p.rTokens[name] + if !ok { + return nil, errors.New("No parameter '" + name + "' found.") + } + return p.rVals[i], nil + default: + return nil, errors.New("No parameter '" + name + "' found.") + } +} + +func generateEvalFunction(functions map[string]govaluate.ExpressionFunction, parameters *enforceParameters) govaluate.ExpressionFunction { + return func(args ...interface{}) (interface{}, error) { + if len(args) != 1 { + return nil, fmt.Errorf("function eval(subrule string) expected %d arguments, but got %d", 1, len(args)) + } + + expression, ok := args[0].(string) + if !ok { + return nil, errors.New("argument of eval(subrule string) must be a string") + } + expression = util.EscapeAssertion(expression) + expr, err := govaluate.NewEvaluableExpressionWithFunctions(expression, functions) + if err != nil { + return nil, fmt.Errorf("error while parsing eval parameter: %s, %s", expression, err.Error()) + } + return expr.Eval(parameters) + } +} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_cached.go b/vendor/github.com/casbin/casbin/v2/enforcer_cached.go new file mode 100644 index 000000000..b89bad78d --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/enforcer_cached.go @@ -0,0 +1,185 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/casbin/casbin/v2/persist/cache" +) + +// CachedEnforcer wraps Enforcer and provides decision cache. +type CachedEnforcer struct { + *Enforcer + expireTime time.Duration + cache cache.Cache + enableCache int32 + locker *sync.RWMutex +} + +type CacheableParam interface { + GetCacheKey() string +} + +// NewCachedEnforcer creates a cached enforcer via file or DB. +func NewCachedEnforcer(params ...interface{}) (*CachedEnforcer, error) { + e := &CachedEnforcer{} + var err error + e.Enforcer, err = NewEnforcer(params...) + if err != nil { + return nil, err + } + + e.enableCache = 1 + e.cache, _ = cache.NewDefaultCache() + e.locker = new(sync.RWMutex) + return e, nil +} + +// EnableCache determines whether to enable cache on Enforce(). When enableCache is enabled, cached result (true | false) will be returned for previous decisions. +func (e *CachedEnforcer) EnableCache(enableCache bool) { + var enabled int32 + if enableCache { + enabled = 1 + } + atomic.StoreInt32(&e.enableCache, enabled) +} + +// Enforce decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (sub, obj, act). +// if rvals is not string , ignore the cache. +func (e *CachedEnforcer) Enforce(rvals ...interface{}) (bool, error) { + if atomic.LoadInt32(&e.enableCache) == 0 { + return e.Enforcer.Enforce(rvals...) + } + + key, ok := e.getKey(rvals...) + if !ok { + return e.Enforcer.Enforce(rvals...) + } + + if res, err := e.getCachedResult(key); err == nil { + return res, nil + } else if err != cache.ErrNoSuchKey { + return res, err + } + + res, err := e.Enforcer.Enforce(rvals...) + if err != nil { + return false, err + } + + err = e.setCachedResult(key, res, e.expireTime) + return res, err +} + +func (e *CachedEnforcer) LoadPolicy() error { + if atomic.LoadInt32(&e.enableCache) != 0 { + if err := e.cache.Clear(); err != nil { + return err + } + } + return e.Enforcer.LoadPolicy() +} + +func (e *CachedEnforcer) RemovePolicy(params ...interface{}) (bool, error) { + if atomic.LoadInt32(&e.enableCache) != 0 { + key, ok := e.getKey(params...) + if ok { + if err := e.cache.Delete(key); err != nil && err != cache.ErrNoSuchKey { + return false, err + } + } + } + return e.Enforcer.RemovePolicy(params...) +} + +func (e *CachedEnforcer) RemovePolicies(rules [][]string) (bool, error) { + if len(rules) != 0 { + if atomic.LoadInt32(&e.enableCache) != 0 { + irule := make([]interface{}, len(rules[0])) + for _, rule := range rules { + for i, param := range rule { + irule[i] = param + } + key, _ := e.getKey(irule...) + if err := e.cache.Delete(key); err != nil && err != cache.ErrNoSuchKey { + return false, err + } + } + } + } + return e.Enforcer.RemovePolicies(rules) +} + +func (e *CachedEnforcer) getCachedResult(key string) (res bool, err error) { + e.locker.Lock() + defer e.locker.Unlock() + return e.cache.Get(key) +} + +func (e *CachedEnforcer) SetExpireTime(expireTime time.Duration) { + e.expireTime = expireTime +} + +func (e *CachedEnforcer) SetCache(c cache.Cache) { + e.cache = c +} + +func (e *CachedEnforcer) setCachedResult(key string, res bool, extra ...interface{}) error { + e.locker.Lock() + defer e.locker.Unlock() + return e.cache.Set(key, res, extra...) +} + +func (e *CachedEnforcer) getKey(params ...interface{}) (string, bool) { + return GetCacheKey(params...) +} + +// InvalidateCache deletes all the existing cached decisions. +func (e *CachedEnforcer) InvalidateCache() error { + e.locker.Lock() + defer e.locker.Unlock() + return e.cache.Clear() +} + +func GetCacheKey(params ...interface{}) (string, bool) { + key := strings.Builder{} + for _, param := range params { + switch typedParam := param.(type) { + case string: + key.WriteString(typedParam) + case CacheableParam: + key.WriteString(typedParam.GetCacheKey()) + default: + return "", false + } + key.WriteString("$$") + } + return key.String(), true +} + +// ClearPolicy clears all policy. +func (e *CachedEnforcer) ClearPolicy() { + if atomic.LoadInt32(&e.enableCache) != 0 { + if err := e.cache.Clear(); err != nil { + e.logger.LogError(err, "clear cache failed") + return + } + } + e.Enforcer.ClearPolicy() +} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_cached_synced.go b/vendor/github.com/casbin/casbin/v2/enforcer_cached_synced.go new file mode 100644 index 000000000..0032460fc --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/enforcer_cached_synced.go @@ -0,0 +1,180 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "sync" + "sync/atomic" + "time" + + "github.com/casbin/casbin/v2/persist/cache" +) + +// SyncedCachedEnforcer wraps Enforcer and provides decision sync cache. +type SyncedCachedEnforcer struct { + *SyncedEnforcer + expireTime time.Duration + cache cache.Cache + enableCache int32 + locker *sync.RWMutex +} + +// NewSyncedCachedEnforcer creates a sync cached enforcer via file or DB. +func NewSyncedCachedEnforcer(params ...interface{}) (*SyncedCachedEnforcer, error) { + e := &SyncedCachedEnforcer{} + var err error + e.SyncedEnforcer, err = NewSyncedEnforcer(params...) + if err != nil { + return nil, err + } + + e.enableCache = 1 + e.cache, _ = cache.NewSyncCache() + e.locker = new(sync.RWMutex) + return e, nil +} + +// EnableCache determines whether to enable cache on Enforce(). When enableCache is enabled, cached result (true | false) will be returned for previous decisions. +func (e *SyncedCachedEnforcer) EnableCache(enableCache bool) { + var enabled int32 + if enableCache { + enabled = 1 + } + atomic.StoreInt32(&e.enableCache, enabled) +} + +// Enforce decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (sub, obj, act). +// if rvals is not string , ignore the cache. +func (e *SyncedCachedEnforcer) Enforce(rvals ...interface{}) (bool, error) { + if atomic.LoadInt32(&e.enableCache) == 0 { + return e.SyncedEnforcer.Enforce(rvals...) + } + + key, ok := e.getKey(rvals...) + if !ok { + return e.SyncedEnforcer.Enforce(rvals...) + } + + if res, err := e.getCachedResult(key); err == nil { + return res, nil + } else if err != cache.ErrNoSuchKey { + return res, err + } + + res, err := e.SyncedEnforcer.Enforce(rvals...) + if err != nil { + return false, err + } + + err = e.setCachedResult(key, res, e.expireTime) + return res, err +} + +func (e *SyncedCachedEnforcer) LoadPolicy() error { + if atomic.LoadInt32(&e.enableCache) != 0 { + if err := e.cache.Clear(); err != nil { + return err + } + } + return e.SyncedEnforcer.LoadPolicy() +} + +func (e *SyncedCachedEnforcer) AddPolicy(params ...interface{}) (bool, error) { + if ok, err := e.checkOneAndRemoveCache(params...); !ok { + return ok, err + } + return e.SyncedEnforcer.AddPolicy(params...) +} + +func (e *SyncedCachedEnforcer) AddPolicies(rules [][]string) (bool, error) { + if ok, err := e.checkManyAndRemoveCache(rules); !ok { + return ok, err + } + return e.SyncedEnforcer.AddPolicies(rules) +} + +func (e *SyncedCachedEnforcer) RemovePolicy(params ...interface{}) (bool, error) { + if ok, err := e.checkOneAndRemoveCache(params...); !ok { + return ok, err + } + return e.SyncedEnforcer.RemovePolicy(params...) +} + +func (e *SyncedCachedEnforcer) RemovePolicies(rules [][]string) (bool, error) { + if ok, err := e.checkManyAndRemoveCache(rules); !ok { + return ok, err + } + return e.SyncedEnforcer.RemovePolicies(rules) +} + +func (e *SyncedCachedEnforcer) getCachedResult(key string) (res bool, err error) { + return e.cache.Get(key) +} + +func (e *SyncedCachedEnforcer) SetExpireTime(expireTime time.Duration) { + e.locker.Lock() + defer e.locker.Unlock() + e.expireTime = expireTime +} + +// SetCache need to be sync cache. +func (e *SyncedCachedEnforcer) SetCache(c cache.Cache) { + e.locker.Lock() + defer e.locker.Unlock() + e.cache = c +} + +func (e *SyncedCachedEnforcer) setCachedResult(key string, res bool, extra ...interface{}) error { + return e.cache.Set(key, res, extra...) +} + +func (e *SyncedCachedEnforcer) getKey(params ...interface{}) (string, bool) { + return GetCacheKey(params...) +} + +// InvalidateCache deletes all the existing cached decisions. +func (e *SyncedCachedEnforcer) InvalidateCache() error { + return e.cache.Clear() +} + +func (e *SyncedCachedEnforcer) checkOneAndRemoveCache(params ...interface{}) (bool, error) { + if atomic.LoadInt32(&e.enableCache) != 0 { + key, ok := e.getKey(params...) + if ok { + if err := e.cache.Delete(key); err != nil && err != cache.ErrNoSuchKey { + return false, err + } + } + } + return true, nil +} + +func (e *SyncedCachedEnforcer) checkManyAndRemoveCache(rules [][]string) (bool, error) { + if len(rules) != 0 { + if atomic.LoadInt32(&e.enableCache) != 0 { + irule := make([]interface{}, len(rules[0])) + for _, rule := range rules { + for i, param := range rule { + irule[i] = param + } + key, _ := e.getKey(irule...) + if err := e.cache.Delete(key); err != nil && err != cache.ErrNoSuchKey { + return false, err + } + } + } + } + return true, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_distributed.go b/vendor/github.com/casbin/casbin/v2/enforcer_distributed.go new file mode 100644 index 000000000..09f667237 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/enforcer_distributed.go @@ -0,0 +1,239 @@ +package casbin + +import ( + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" +) + +// DistributedEnforcer wraps SyncedEnforcer for dispatcher. +type DistributedEnforcer struct { + *SyncedEnforcer +} + +func NewDistributedEnforcer(params ...interface{}) (*DistributedEnforcer, error) { + e := &DistributedEnforcer{} + var err error + e.SyncedEnforcer, err = NewSyncedEnforcer(params...) + if err != nil { + return nil, err + } + + return e, nil +} + +// SetDispatcher sets the current dispatcher. +func (d *DistributedEnforcer) SetDispatcher(dispatcher persist.Dispatcher) { + d.dispatcher = dispatcher +} + +// AddPoliciesSelf provides a method for dispatcher to add authorization rules to the current policy. +// The function returns the rules affected and error. +func (d *DistributedEnforcer) AddPoliciesSelf(shouldPersist func() bool, sec string, ptype string, rules [][]string) (affected [][]string, err error) { + d.m.Lock() + defer d.m.Unlock() + if shouldPersist != nil && shouldPersist() { + var noExistsPolicy [][]string + for _, rule := range rules { + var hasPolicy bool + hasPolicy, err = d.model.HasPolicy(sec, ptype, rule) + if err != nil { + return nil, err + } + if !hasPolicy { + noExistsPolicy = append(noExistsPolicy, rule) + } + } + + if err = d.adapter.(persist.BatchAdapter).AddPolicies(sec, ptype, noExistsPolicy); err != nil && err.Error() != notImplemented { + return nil, err + } + } + + affected, err = d.model.AddPoliciesWithAffected(sec, ptype, rules) + if err != nil { + return affected, err + } + + if sec == "g" { + err := d.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, affected) + if err != nil { + return affected, err + } + } + + return affected, nil +} + +// RemovePoliciesSelf provides a method for dispatcher to remove a set of rules from current policy. +// The function returns the rules affected and error. +func (d *DistributedEnforcer) RemovePoliciesSelf(shouldPersist func() bool, sec string, ptype string, rules [][]string) (affected [][]string, err error) { + d.m.Lock() + defer d.m.Unlock() + if shouldPersist != nil && shouldPersist() { + if err = d.adapter.(persist.BatchAdapter).RemovePolicies(sec, ptype, rules); err != nil { + if err.Error() != notImplemented { + return nil, err + } + } + } + + affected, err = d.model.RemovePoliciesWithAffected(sec, ptype, rules) + if err != nil { + return affected, err + } + + if sec == "g" { + err = d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, affected) + if err != nil { + return affected, err + } + } + + return affected, err +} + +// RemoveFilteredPolicySelf provides a method for dispatcher to remove an authorization rule from the current policy, field filters can be specified. +// The function returns the rules affected and error. +func (d *DistributedEnforcer) RemoveFilteredPolicySelf(shouldPersist func() bool, sec string, ptype string, fieldIndex int, fieldValues ...string) (affected [][]string, err error) { + d.m.Lock() + defer d.m.Unlock() + if shouldPersist != nil && shouldPersist() { + if err = d.adapter.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...); err != nil { + if err.Error() != notImplemented { + return nil, err + } + } + } + + _, affected, err = d.model.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) + if err != nil { + return affected, err + } + + if sec == "g" { + err := d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, affected) + if err != nil { + return affected, err + } + } + + return affected, nil +} + +// ClearPolicySelf provides a method for dispatcher to clear all rules from the current policy. +func (d *DistributedEnforcer) ClearPolicySelf(shouldPersist func() bool) error { + d.m.Lock() + defer d.m.Unlock() + if shouldPersist != nil && shouldPersist() { + err := d.adapter.SavePolicy(nil) + if err != nil { + return err + } + } + + d.model.ClearPolicy() + + return nil +} + +// UpdatePolicySelf provides a method for dispatcher to update an authorization rule from the current policy. +func (d *DistributedEnforcer) UpdatePolicySelf(shouldPersist func() bool, sec string, ptype string, oldRule, newRule []string) (affected bool, err error) { + d.m.Lock() + defer d.m.Unlock() + if shouldPersist != nil && shouldPersist() { + err = d.adapter.(persist.UpdatableAdapter).UpdatePolicy(sec, ptype, oldRule, newRule) + if err != nil { + return false, err + } + } + + ruleUpdated, err := d.model.UpdatePolicy(sec, ptype, oldRule, newRule) + if !ruleUpdated || err != nil { + return ruleUpdated, err + } + + if sec == "g" { + err := d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, [][]string{oldRule}) // remove the old rule + if err != nil { + return ruleUpdated, err + } + err = d.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, [][]string{newRule}) // add the new rule + if err != nil { + return ruleUpdated, err + } + } + + return ruleUpdated, nil +} + +// UpdatePoliciesSelf provides a method for dispatcher to update a set of authorization rules from the current policy. +func (d *DistributedEnforcer) UpdatePoliciesSelf(shouldPersist func() bool, sec string, ptype string, oldRules, newRules [][]string) (affected bool, err error) { + d.m.Lock() + defer d.m.Unlock() + if shouldPersist != nil && shouldPersist() { + err = d.adapter.(persist.UpdatableAdapter).UpdatePolicies(sec, ptype, oldRules, newRules) + if err != nil { + return false, err + } + } + + ruleUpdated, err := d.model.UpdatePolicies(sec, ptype, oldRules, newRules) + if !ruleUpdated || err != nil { + return ruleUpdated, err + } + + if sec == "g" { + err := d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, oldRules) // remove the old rule + if err != nil { + return ruleUpdated, err + } + err = d.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, newRules) // add the new rule + if err != nil { + return ruleUpdated, err + } + } + + return ruleUpdated, nil +} + +// UpdateFilteredPoliciesSelf provides a method for dispatcher to update a set of authorization rules from the current policy. +func (d *DistributedEnforcer) UpdateFilteredPoliciesSelf(shouldPersist func() bool, sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) (bool, error) { + d.m.Lock() + defer d.m.Unlock() + var ( + oldRules [][]string + err error + ) + if shouldPersist != nil && shouldPersist() { + oldRules, err = d.adapter.(persist.UpdatableAdapter).UpdateFilteredPolicies(sec, ptype, newRules, fieldIndex, fieldValues...) + if err != nil { + return false, err + } + } + + ruleChanged, err := d.model.RemovePolicies(sec, ptype, oldRules) + if err != nil { + return ruleChanged, err + } + err = d.model.AddPolicies(sec, ptype, newRules) + if err != nil { + return ruleChanged, err + } + ruleChanged = ruleChanged && len(newRules) != 0 + if !ruleChanged { + return ruleChanged, nil + } + + if sec == "g" { + err := d.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, oldRules) // remove the old rule + if err != nil { + return ruleChanged, err + } + err = d.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, newRules) // add the new rule + if err != nil { + return ruleChanged, err + } + } + + return true, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_interface.go b/vendor/github.com/casbin/casbin/v2/enforcer_interface.go new file mode 100644 index 000000000..d22dcf10b --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/enforcer_interface.go @@ -0,0 +1,177 @@ +// Copyright 2019 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "github.com/casbin/casbin/v2/effector" + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" + "github.com/casbin/casbin/v2/rbac" + "github.com/casbin/govaluate" +) + +var _ IEnforcer = &Enforcer{} +var _ IEnforcer = &SyncedEnforcer{} +var _ IEnforcer = &CachedEnforcer{} + +// IEnforcer is the API interface of Enforcer. +type IEnforcer interface { + /* Enforcer API */ + InitWithFile(modelPath string, policyPath string) error + InitWithAdapter(modelPath string, adapter persist.Adapter) error + InitWithModelAndAdapter(m model.Model, adapter persist.Adapter) error + LoadModel() error + GetModel() model.Model + SetModel(m model.Model) + GetAdapter() persist.Adapter + SetAdapter(adapter persist.Adapter) + SetWatcher(watcher persist.Watcher) error + GetRoleManager() rbac.RoleManager + SetRoleManager(rm rbac.RoleManager) + SetEffector(eft effector.Effector) + ClearPolicy() + LoadPolicy() error + LoadFilteredPolicy(filter interface{}) error + LoadIncrementalFilteredPolicy(filter interface{}) error + IsFiltered() bool + SavePolicy() error + EnableEnforce(enable bool) + EnableLog(enable bool) + EnableAutoNotifyWatcher(enable bool) + EnableAutoSave(autoSave bool) + EnableAutoBuildRoleLinks(autoBuildRoleLinks bool) + BuildRoleLinks() error + Enforce(rvals ...interface{}) (bool, error) + EnforceWithMatcher(matcher string, rvals ...interface{}) (bool, error) + EnforceEx(rvals ...interface{}) (bool, []string, error) + EnforceExWithMatcher(matcher string, rvals ...interface{}) (bool, []string, error) + BatchEnforce(requests [][]interface{}) ([]bool, error) + BatchEnforceWithMatcher(matcher string, requests [][]interface{}) ([]bool, error) + + /* RBAC API */ + GetRolesForUser(name string, domain ...string) ([]string, error) + GetUsersForRole(name string, domain ...string) ([]string, error) + HasRoleForUser(name string, role string, domain ...string) (bool, error) + AddRoleForUser(user string, role string, domain ...string) (bool, error) + AddPermissionForUser(user string, permission ...string) (bool, error) + AddPermissionsForUser(user string, permissions ...[]string) (bool, error) + DeletePermissionForUser(user string, permission ...string) (bool, error) + DeletePermissionsForUser(user string) (bool, error) + GetPermissionsForUser(user string, domain ...string) ([][]string, error) + HasPermissionForUser(user string, permission ...string) (bool, error) + GetImplicitRolesForUser(name string, domain ...string) ([]string, error) + GetImplicitPermissionsForUser(user string, domain ...string) ([][]string, error) + GetImplicitUsersForPermission(permission ...string) ([]string, error) + DeleteRoleForUser(user string, role string, domain ...string) (bool, error) + DeleteRolesForUser(user string, domain ...string) (bool, error) + DeleteUser(user string) (bool, error) + DeleteRole(role string) (bool, error) + DeletePermission(permission ...string) (bool, error) + + /* RBAC API with domains*/ + GetUsersForRoleInDomain(name string, domain string) []string + GetRolesForUserInDomain(name string, domain string) []string + GetPermissionsForUserInDomain(user string, domain string) [][]string + AddRoleForUserInDomain(user string, role string, domain string) (bool, error) + DeleteRoleForUserInDomain(user string, role string, domain string) (bool, error) + GetAllUsersByDomain(domain string) ([]string, error) + DeleteRolesForUserInDomain(user string, domain string) (bool, error) + DeleteAllUsersByDomain(domain string) (bool, error) + DeleteDomains(domains ...string) (bool, error) + GetAllDomains() ([]string, error) + GetAllRolesByDomain(domain string) ([]string, error) + + /* Management API */ + GetAllSubjects() ([]string, error) + GetAllNamedSubjects(ptype string) ([]string, error) + GetAllObjects() ([]string, error) + GetAllNamedObjects(ptype string) ([]string, error) + GetAllActions() ([]string, error) + GetAllNamedActions(ptype string) ([]string, error) + GetAllRoles() ([]string, error) + GetAllNamedRoles(ptype string) ([]string, error) + GetPolicy() ([][]string, error) + GetFilteredPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) + GetNamedPolicy(ptype string) ([][]string, error) + GetFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) + GetGroupingPolicy() ([][]string, error) + GetFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) + GetNamedGroupingPolicy(ptype string) ([][]string, error) + GetFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) + HasPolicy(params ...interface{}) (bool, error) + HasNamedPolicy(ptype string, params ...interface{}) (bool, error) + AddPolicy(params ...interface{}) (bool, error) + AddPolicies(rules [][]string) (bool, error) + AddNamedPolicy(ptype string, params ...interface{}) (bool, error) + AddNamedPolicies(ptype string, rules [][]string) (bool, error) + AddPoliciesEx(rules [][]string) (bool, error) + AddNamedPoliciesEx(ptype string, rules [][]string) (bool, error) + RemovePolicy(params ...interface{}) (bool, error) + RemovePolicies(rules [][]string) (bool, error) + RemoveFilteredPolicy(fieldIndex int, fieldValues ...string) (bool, error) + RemoveNamedPolicy(ptype string, params ...interface{}) (bool, error) + RemoveNamedPolicies(ptype string, rules [][]string) (bool, error) + RemoveFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) + HasGroupingPolicy(params ...interface{}) (bool, error) + HasNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) + AddGroupingPolicy(params ...interface{}) (bool, error) + AddGroupingPolicies(rules [][]string) (bool, error) + AddGroupingPoliciesEx(rules [][]string) (bool, error) + AddNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) + AddNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) + AddNamedGroupingPoliciesEx(ptype string, rules [][]string) (bool, error) + RemoveGroupingPolicy(params ...interface{}) (bool, error) + RemoveGroupingPolicies(rules [][]string) (bool, error) + RemoveFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) (bool, error) + RemoveNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) + RemoveNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) + RemoveFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) + AddFunction(name string, function govaluate.ExpressionFunction) + + UpdatePolicy(oldPolicy []string, newPolicy []string) (bool, error) + UpdatePolicies(oldPolicies [][]string, newPolicies [][]string) (bool, error) + UpdateFilteredPolicies(newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) + + UpdateGroupingPolicy(oldRule []string, newRule []string) (bool, error) + UpdateGroupingPolicies(oldRules [][]string, newRules [][]string) (bool, error) + UpdateNamedGroupingPolicy(ptype string, oldRule []string, newRule []string) (bool, error) + UpdateNamedGroupingPolicies(ptype string, oldRules [][]string, newRules [][]string) (bool, error) + + /* Management API with autoNotifyWatcher disabled */ + SelfAddPolicy(sec string, ptype string, rule []string) (bool, error) + SelfAddPolicies(sec string, ptype string, rules [][]string) (bool, error) + SelfAddPoliciesEx(sec string, ptype string, rules [][]string) (bool, error) + SelfRemovePolicy(sec string, ptype string, rule []string) (bool, error) + SelfRemovePolicies(sec string, ptype string, rules [][]string) (bool, error) + SelfRemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) (bool, error) + SelfUpdatePolicy(sec string, ptype string, oldRule, newRule []string) (bool, error) + SelfUpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) (bool, error) +} + +var _ IDistributedEnforcer = &DistributedEnforcer{} + +// IDistributedEnforcer defines dispatcher enforcer. +type IDistributedEnforcer interface { + IEnforcer + SetDispatcher(dispatcher persist.Dispatcher) + /* Management API for DistributedEnforcer*/ + AddPoliciesSelf(shouldPersist func() bool, sec string, ptype string, rules [][]string) (affected [][]string, err error) + RemovePoliciesSelf(shouldPersist func() bool, sec string, ptype string, rules [][]string) (affected [][]string, err error) + RemoveFilteredPolicySelf(shouldPersist func() bool, sec string, ptype string, fieldIndex int, fieldValues ...string) (affected [][]string, err error) + ClearPolicySelf(shouldPersist func() bool) error + UpdatePolicySelf(shouldPersist func() bool, sec string, ptype string, oldRule, newRule []string) (affected bool, err error) + UpdatePoliciesSelf(shouldPersist func() bool, sec string, ptype string, oldRules, newRules [][]string) (affected bool, err error) + UpdateFilteredPoliciesSelf(shouldPersist func() bool, sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) (bool, error) +} diff --git a/vendor/github.com/casbin/casbin/v2/enforcer_synced.go b/vendor/github.com/casbin/casbin/v2/enforcer_synced.go new file mode 100644 index 000000000..ae2fc7c4d --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/enforcer_synced.go @@ -0,0 +1,650 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "sync" + "sync/atomic" + "time" + + "github.com/casbin/govaluate" + + "github.com/casbin/casbin/v2/persist" +) + +// SyncedEnforcer wraps Enforcer and provides synchronized access. +type SyncedEnforcer struct { + *Enforcer + m sync.RWMutex + stopAutoLoad chan struct{} + autoLoadRunning int32 +} + +// NewSyncedEnforcer creates a synchronized enforcer via file or DB. +func NewSyncedEnforcer(params ...interface{}) (*SyncedEnforcer, error) { + e := &SyncedEnforcer{} + var err error + e.Enforcer, err = NewEnforcer(params...) + if err != nil { + return nil, err + } + + e.stopAutoLoad = make(chan struct{}, 1) + e.autoLoadRunning = 0 + return e, nil +} + +// GetLock return the private RWMutex lock. +func (e *SyncedEnforcer) GetLock() *sync.RWMutex { + return &e.m +} + +// IsAutoLoadingRunning check if SyncedEnforcer is auto loading policies. +func (e *SyncedEnforcer) IsAutoLoadingRunning() bool { + return atomic.LoadInt32(&(e.autoLoadRunning)) != 0 +} + +// StartAutoLoadPolicy starts a go routine that will every specified duration call LoadPolicy. +func (e *SyncedEnforcer) StartAutoLoadPolicy(d time.Duration) { + // Don't start another goroutine if there is already one running + if !atomic.CompareAndSwapInt32(&e.autoLoadRunning, 0, 1) { + return + } + + ticker := time.NewTicker(d) + go func() { + defer func() { + ticker.Stop() + atomic.StoreInt32(&(e.autoLoadRunning), int32(0)) + }() + n := 1 + for { + select { + case <-ticker.C: + // error intentionally ignored + _ = e.LoadPolicy() + // Uncomment this line to see when the policy is loaded. + // log.Print("Load policy for time: ", n) + n++ + case <-e.stopAutoLoad: + return + } + } + }() +} + +// StopAutoLoadPolicy causes the go routine to exit. +func (e *SyncedEnforcer) StopAutoLoadPolicy() { + if e.IsAutoLoadingRunning() { + e.stopAutoLoad <- struct{}{} + } +} + +// SetWatcher sets the current watcher. +func (e *SyncedEnforcer) SetWatcher(watcher persist.Watcher) error { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SetWatcher(watcher) +} + +// LoadModel reloads the model from the model CONF file. +func (e *SyncedEnforcer) LoadModel() error { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.LoadModel() +} + +// ClearPolicy clears all policy. +func (e *SyncedEnforcer) ClearPolicy() { + e.m.Lock() + defer e.m.Unlock() + e.Enforcer.ClearPolicy() +} + +// LoadPolicy reloads the policy from file/database. +func (e *SyncedEnforcer) LoadPolicy() error { + e.m.RLock() + newModel, err := e.loadPolicyFromAdapter(e.model) + e.m.RUnlock() + if err != nil { + return err + } + e.m.Lock() + err = e.applyModifiedModel(newModel) + e.m.Unlock() + if err != nil { + return err + } + return nil +} + +// LoadFilteredPolicy reloads a filtered policy from file/database. +func (e *SyncedEnforcer) LoadFilteredPolicy(filter interface{}) error { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.LoadFilteredPolicy(filter) +} + +// LoadIncrementalFilteredPolicy reloads a filtered policy from file/database. +func (e *SyncedEnforcer) LoadIncrementalFilteredPolicy(filter interface{}) error { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.LoadIncrementalFilteredPolicy(filter) +} + +// SavePolicy saves the current policy (usually after changed with Casbin API) back to file/database. +func (e *SyncedEnforcer) SavePolicy() error { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SavePolicy() +} + +// BuildRoleLinks manually rebuild the role inheritance relations. +func (e *SyncedEnforcer) BuildRoleLinks() error { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.BuildRoleLinks() +} + +// Enforce decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (sub, obj, act). +func (e *SyncedEnforcer) Enforce(rvals ...interface{}) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.Enforce(rvals...) +} + +// EnforceWithMatcher use a custom matcher to decides whether a "subject" can access a "object" with the operation "action", input parameters are usually: (matcher, sub, obj, act), use model matcher by default when matcher is "". +func (e *SyncedEnforcer) EnforceWithMatcher(matcher string, rvals ...interface{}) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.EnforceWithMatcher(matcher, rvals...) +} + +// EnforceEx explain enforcement by informing matched rules. +func (e *SyncedEnforcer) EnforceEx(rvals ...interface{}) (bool, []string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.EnforceEx(rvals...) +} + +// EnforceExWithMatcher use a custom matcher and explain enforcement by informing matched rules. +func (e *SyncedEnforcer) EnforceExWithMatcher(matcher string, rvals ...interface{}) (bool, []string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.EnforceExWithMatcher(matcher, rvals...) +} + +// BatchEnforce enforce in batches. +func (e *SyncedEnforcer) BatchEnforce(requests [][]interface{}) ([]bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.BatchEnforce(requests) +} + +// BatchEnforceWithMatcher enforce with matcher in batches. +func (e *SyncedEnforcer) BatchEnforceWithMatcher(matcher string, requests [][]interface{}) ([]bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.BatchEnforceWithMatcher(matcher, requests) +} + +// GetAllSubjects gets the list of subjects that show up in the current policy. +func (e *SyncedEnforcer) GetAllSubjects() ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllSubjects() +} + +// GetAllNamedSubjects gets the list of subjects that show up in the current named policy. +func (e *SyncedEnforcer) GetAllNamedSubjects(ptype string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllNamedSubjects(ptype) +} + +// GetAllObjects gets the list of objects that show up in the current policy. +func (e *SyncedEnforcer) GetAllObjects() ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllObjects() +} + +// GetAllNamedObjects gets the list of objects that show up in the current named policy. +func (e *SyncedEnforcer) GetAllNamedObjects(ptype string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllNamedObjects(ptype) +} + +// GetAllActions gets the list of actions that show up in the current policy. +func (e *SyncedEnforcer) GetAllActions() ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllActions() +} + +// GetAllNamedActions gets the list of actions that show up in the current named policy. +func (e *SyncedEnforcer) GetAllNamedActions(ptype string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllNamedActions(ptype) +} + +// GetAllRoles gets the list of roles that show up in the current policy. +func (e *SyncedEnforcer) GetAllRoles() ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllRoles() +} + +// GetAllNamedRoles gets the list of roles that show up in the current named policy. +func (e *SyncedEnforcer) GetAllNamedRoles(ptype string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetAllNamedRoles(ptype) +} + +// GetPolicy gets all the authorization rules in the policy. +func (e *SyncedEnforcer) GetPolicy() ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetPolicy() +} + +// GetFilteredPolicy gets all the authorization rules in the policy, field filters can be specified. +func (e *SyncedEnforcer) GetFilteredPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetFilteredPolicy(fieldIndex, fieldValues...) +} + +// GetNamedPolicy gets all the authorization rules in the named policy. +func (e *SyncedEnforcer) GetNamedPolicy(ptype string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetNamedPolicy(ptype) +} + +// GetFilteredNamedPolicy gets all the authorization rules in the named policy, field filters can be specified. +func (e *SyncedEnforcer) GetFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetFilteredNamedPolicy(ptype, fieldIndex, fieldValues...) +} + +// GetGroupingPolicy gets all the role inheritance rules in the policy. +func (e *SyncedEnforcer) GetGroupingPolicy() ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetGroupingPolicy() +} + +// GetFilteredGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. +func (e *SyncedEnforcer) GetFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetFilteredGroupingPolicy(fieldIndex, fieldValues...) +} + +// GetNamedGroupingPolicy gets all the role inheritance rules in the policy. +func (e *SyncedEnforcer) GetNamedGroupingPolicy(ptype string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetNamedGroupingPolicy(ptype) +} + +// GetFilteredNamedGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. +func (e *SyncedEnforcer) GetFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetFilteredNamedGroupingPolicy(ptype, fieldIndex, fieldValues...) +} + +// HasPolicy determines whether an authorization rule exists. +func (e *SyncedEnforcer) HasPolicy(params ...interface{}) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.HasPolicy(params...) +} + +// HasNamedPolicy determines whether a named authorization rule exists. +func (e *SyncedEnforcer) HasNamedPolicy(ptype string, params ...interface{}) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.HasNamedPolicy(ptype, params...) +} + +// AddPolicy adds an authorization rule to the current policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *SyncedEnforcer) AddPolicy(params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddPolicy(params...) +} + +// AddPolicies adds authorization rules to the current policy. +// If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. +// Otherwise the function returns true for the corresponding rule by adding the new rule. +func (e *SyncedEnforcer) AddPolicies(rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddPolicies(rules) +} + +// AddPoliciesEx adds authorization rules to the current policy. +// If the rule already exists, the rule will not be added. +// But unlike AddPolicies, other non-existent rules are added instead of returning false directly. +func (e *SyncedEnforcer) AddPoliciesEx(rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddPoliciesEx(rules) +} + +// AddNamedPolicy adds an authorization rule to the current named policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *SyncedEnforcer) AddNamedPolicy(ptype string, params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddNamedPolicy(ptype, params...) +} + +// AddNamedPolicies adds authorization rules to the current named policy. +// If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. +// Otherwise the function returns true for the corresponding by adding the new rule. +func (e *SyncedEnforcer) AddNamedPolicies(ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddNamedPolicies(ptype, rules) +} + +// AddNamedPoliciesEx adds authorization rules to the current named policy. +// If the rule already exists, the rule will not be added. +// But unlike AddNamedPolicies, other non-existent rules are added instead of returning false directly. +func (e *SyncedEnforcer) AddNamedPoliciesEx(ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddNamedPoliciesEx(ptype, rules) +} + +// RemovePolicy removes an authorization rule from the current policy. +func (e *SyncedEnforcer) RemovePolicy(params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemovePolicy(params...) +} + +// UpdatePolicy updates an authorization rule from the current policy. +func (e *SyncedEnforcer) UpdatePolicy(oldPolicy []string, newPolicy []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdatePolicy(oldPolicy, newPolicy) +} + +func (e *SyncedEnforcer) UpdateNamedPolicy(ptype string, p1 []string, p2 []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateNamedPolicy(ptype, p1, p2) +} + +// UpdatePolicies updates authorization rules from the current policies. +func (e *SyncedEnforcer) UpdatePolicies(oldPolices [][]string, newPolicies [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdatePolicies(oldPolices, newPolicies) +} + +func (e *SyncedEnforcer) UpdateNamedPolicies(ptype string, p1 [][]string, p2 [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateNamedPolicies(ptype, p1, p2) +} + +func (e *SyncedEnforcer) UpdateFilteredPolicies(newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateFilteredPolicies(newPolicies, fieldIndex, fieldValues...) +} + +func (e *SyncedEnforcer) UpdateFilteredNamedPolicies(ptype string, newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateFilteredNamedPolicies(ptype, newPolicies, fieldIndex, fieldValues...) +} + +// RemovePolicies removes authorization rules from the current policy. +func (e *SyncedEnforcer) RemovePolicies(rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemovePolicies(rules) +} + +// RemoveFilteredPolicy removes an authorization rule from the current policy, field filters can be specified. +func (e *SyncedEnforcer) RemoveFilteredPolicy(fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveFilteredPolicy(fieldIndex, fieldValues...) +} + +// RemoveNamedPolicy removes an authorization rule from the current named policy. +func (e *SyncedEnforcer) RemoveNamedPolicy(ptype string, params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveNamedPolicy(ptype, params...) +} + +// RemoveNamedPolicies removes authorization rules from the current named policy. +func (e *SyncedEnforcer) RemoveNamedPolicies(ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveNamedPolicies(ptype, rules) +} + +// RemoveFilteredNamedPolicy removes an authorization rule from the current named policy, field filters can be specified. +func (e *SyncedEnforcer) RemoveFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveFilteredNamedPolicy(ptype, fieldIndex, fieldValues...) +} + +// HasGroupingPolicy determines whether a role inheritance rule exists. +func (e *SyncedEnforcer) HasGroupingPolicy(params ...interface{}) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.HasGroupingPolicy(params...) +} + +// HasNamedGroupingPolicy determines whether a named role inheritance rule exists. +func (e *SyncedEnforcer) HasNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.HasNamedGroupingPolicy(ptype, params...) +} + +// AddGroupingPolicy adds a role inheritance rule to the current policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *SyncedEnforcer) AddGroupingPolicy(params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddGroupingPolicy(params...) +} + +// AddGroupingPolicies adds role inheritance rulea to the current policy. +// If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. +// Otherwise the function returns true for the corresponding policy rule by adding the new rule. +func (e *SyncedEnforcer) AddGroupingPolicies(rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddGroupingPolicies(rules) +} + +// AddGroupingPoliciesEx adds role inheritance rules to the current policy. +// If the rule already exists, the rule will not be added. +// But unlike AddGroupingPolicies, other non-existent rules are added instead of returning false directly. +func (e *SyncedEnforcer) AddGroupingPoliciesEx(rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddGroupingPoliciesEx(rules) +} + +// AddNamedGroupingPolicy adds a named role inheritance rule to the current policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *SyncedEnforcer) AddNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddNamedGroupingPolicy(ptype, params...) +} + +// AddNamedGroupingPolicies adds named role inheritance rules to the current policy. +// If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. +// Otherwise the function returns true for the corresponding policy rule by adding the new rule. +func (e *SyncedEnforcer) AddNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddNamedGroupingPolicies(ptype, rules) +} + +// AddNamedGroupingPoliciesEx adds named role inheritance rules to the current policy. +// If the rule already exists, the rule will not be added. +// But unlike AddNamedGroupingPolicies, other non-existent rules are added instead of returning false directly. +func (e *SyncedEnforcer) AddNamedGroupingPoliciesEx(ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddNamedGroupingPoliciesEx(ptype, rules) +} + +// RemoveGroupingPolicy removes a role inheritance rule from the current policy. +func (e *SyncedEnforcer) RemoveGroupingPolicy(params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveGroupingPolicy(params...) +} + +// RemoveGroupingPolicies removes role inheritance rules from the current policy. +func (e *SyncedEnforcer) RemoveGroupingPolicies(rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveGroupingPolicies(rules) +} + +// RemoveFilteredGroupingPolicy removes a role inheritance rule from the current policy, field filters can be specified. +func (e *SyncedEnforcer) RemoveFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveFilteredGroupingPolicy(fieldIndex, fieldValues...) +} + +// RemoveNamedGroupingPolicy removes a role inheritance rule from the current named policy. +func (e *SyncedEnforcer) RemoveNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveNamedGroupingPolicy(ptype, params...) +} + +// RemoveNamedGroupingPolicies removes role inheritance rules from the current named policy. +func (e *SyncedEnforcer) RemoveNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveNamedGroupingPolicies(ptype, rules) +} + +func (e *SyncedEnforcer) UpdateGroupingPolicy(oldRule []string, newRule []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateGroupingPolicy(oldRule, newRule) +} + +func (e *SyncedEnforcer) UpdateGroupingPolicies(oldRules [][]string, newRules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateGroupingPolicies(oldRules, newRules) +} + +func (e *SyncedEnforcer) UpdateNamedGroupingPolicy(ptype string, oldRule []string, newRule []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateNamedGroupingPolicy(ptype, oldRule, newRule) +} + +func (e *SyncedEnforcer) UpdateNamedGroupingPolicies(ptype string, oldRules [][]string, newRules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.UpdateNamedGroupingPolicies(ptype, oldRules, newRules) +} + +// RemoveFilteredNamedGroupingPolicy removes a role inheritance rule from the current named policy, field filters can be specified. +func (e *SyncedEnforcer) RemoveFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.RemoveFilteredNamedGroupingPolicy(ptype, fieldIndex, fieldValues...) +} + +// AddFunction adds a customized function. +func (e *SyncedEnforcer) AddFunction(name string, function govaluate.ExpressionFunction) { + e.m.Lock() + defer e.m.Unlock() + e.Enforcer.AddFunction(name, function) +} + +func (e *SyncedEnforcer) SelfAddPolicy(sec string, ptype string, rule []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfAddPolicy(sec, ptype, rule) +} + +func (e *SyncedEnforcer) SelfAddPolicies(sec string, ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfAddPolicies(sec, ptype, rules) +} + +func (e *SyncedEnforcer) SelfAddPoliciesEx(sec string, ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfAddPoliciesEx(sec, ptype, rules) +} + +func (e *SyncedEnforcer) SelfRemovePolicy(sec string, ptype string, rule []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfRemovePolicy(sec, ptype, rule) +} + +func (e *SyncedEnforcer) SelfRemovePolicies(sec string, ptype string, rules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfRemovePolicies(sec, ptype, rules) +} + +func (e *SyncedEnforcer) SelfRemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfRemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) +} + +func (e *SyncedEnforcer) SelfUpdatePolicy(sec string, ptype string, oldRule, newRule []string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfUpdatePolicy(sec, ptype, oldRule, newRule) +} + +func (e *SyncedEnforcer) SelfUpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.SelfUpdatePolicies(sec, ptype, oldRules, newRules) +} diff --git a/vendor/github.com/casbin/casbin/v2/errors/rbac_errors.go b/vendor/github.com/casbin/casbin/v2/errors/rbac_errors.go new file mode 100644 index 000000000..2f358b372 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/errors/rbac_errors.go @@ -0,0 +1,30 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import "errors" + +// Global errors for rbac defined here. +var ( + ErrNameNotFound = errors.New("error: name does not exist") + ErrDomainParameter = errors.New("error: domain should be 1 parameter") + ErrLinkNotFound = errors.New("error: link between name1 and name2 does not exist") + ErrUseDomainParameter = errors.New("error: useDomain should be 1 parameter") + ErrInvalidFieldValuesParameter = errors.New("fieldValues requires at least one parameter") + + // GetAllowedObjectConditions errors. + ErrObjCondition = errors.New("need to meet the prefix required by the object condition") + ErrEmptyCondition = errors.New("GetAllowedObjectConditions have an empty condition") +) diff --git a/vendor/github.com/casbin/casbin/v2/frontend.go b/vendor/github.com/casbin/casbin/v2/frontend.go new file mode 100644 index 000000000..101a23a5d --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/frontend.go @@ -0,0 +1,57 @@ +// Copyright 2020 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "bytes" + "encoding/json" +) + +func CasbinJsGetPermissionForUser(e IEnforcer, user string) (string, error) { + model := e.GetModel() + m := map[string]interface{}{} + + m["m"] = model.ToText() + + pRules := [][]string{} + for ptype := range model["p"] { + policies, err := model.GetPolicy("p", ptype) + if err != nil { + return "", err + } + for _, rules := range policies { + pRules = append(pRules, append([]string{ptype}, rules...)) + } + } + m["p"] = pRules + + gRules := [][]string{} + for ptype := range model["g"] { + policies, err := model.GetPolicy("g", ptype) + if err != nil { + return "", err + } + for _, rules := range policies { + gRules = append(gRules, append([]string{ptype}, rules...)) + } + } + m["g"] = gRules + + result := bytes.NewBuffer([]byte{}) + encoder := json.NewEncoder(result) + encoder.SetEscapeHTML(false) + err := encoder.Encode(m) + return result.String(), err +} diff --git a/vendor/github.com/casbin/casbin/v2/frontend_old.go b/vendor/github.com/casbin/casbin/v2/frontend_old.go new file mode 100644 index 000000000..139b164fb --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/frontend_old.go @@ -0,0 +1,30 @@ +// Copyright 2021 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import "encoding/json" + +func CasbinJsGetPermissionForUserOld(e IEnforcer, user string) ([]byte, error) { + policy, err := e.GetImplicitPermissionsForUser(user) + if err != nil { + return nil, err + } + permission := make(map[string][]string) + for i := 0; i < len(policy); i++ { + permission[policy[i][2]] = append(permission[policy[i][2]], policy[i][1]) + } + b, _ := json.Marshal(permission) + return b, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/internal_api.go b/vendor/github.com/casbin/casbin/v2/internal_api.go new file mode 100644 index 000000000..cd329016c --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/internal_api.go @@ -0,0 +1,497 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "fmt" + + Err "github.com/casbin/casbin/v2/errors" + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" +) + +const ( + notImplemented = "not implemented" +) + +func (e *Enforcer) shouldPersist() bool { + return e.adapter != nil && e.autoSave +} + +func (e *Enforcer) shouldNotify() bool { + return e.watcher != nil && e.autoNotifyWatcher +} + +// addPolicy adds a rule to the current policy. +func (e *Enforcer) addPolicyWithoutNotify(sec string, ptype string, rule []string) (bool, error) { + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.AddPolicies(sec, ptype, [][]string{rule}) + } + + hasPolicy, err := e.model.HasPolicy(sec, ptype, rule) + if hasPolicy || err != nil { + return hasPolicy, err + } + + if e.shouldPersist() { + if err = e.adapter.AddPolicy(sec, ptype, rule); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + + err = e.model.AddPolicy(sec, ptype, rule) + if err != nil { + return false, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, [][]string{rule}) + if err != nil { + return true, err + } + } + + return true, nil +} + +// addPoliciesWithoutNotify adds rules to the current policy without notify +// If autoRemoveRepeat == true, existing rules are automatically filtered +// Otherwise, false is returned directly. +func (e *Enforcer) addPoliciesWithoutNotify(sec string, ptype string, rules [][]string, autoRemoveRepeat bool) (bool, error) { + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.AddPolicies(sec, ptype, rules) + } + + if !autoRemoveRepeat { + hasPolicies, err := e.model.HasPolicies(sec, ptype, rules) + if hasPolicies || err != nil { + return false, err + } + } + + if e.shouldPersist() { + if err := e.adapter.(persist.BatchAdapter).AddPolicies(sec, ptype, rules); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + + err := e.model.AddPolicies(sec, ptype, rules) + if err != nil { + return false, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, rules) + if err != nil { + return true, err + } + + err = e.BuildIncrementalConditionalRoleLinks(model.PolicyAdd, ptype, rules) + if err != nil { + return true, err + } + } + + return true, nil +} + +// removePolicy removes a rule from the current policy. +func (e *Enforcer) removePolicyWithoutNotify(sec string, ptype string, rule []string) (bool, error) { + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.RemovePolicies(sec, ptype, [][]string{rule}) + } + + if e.shouldPersist() { + if err := e.adapter.RemovePolicy(sec, ptype, rule); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + + ruleRemoved, err := e.model.RemovePolicy(sec, ptype, rule) + if !ruleRemoved || err != nil { + return ruleRemoved, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, [][]string{rule}) + if err != nil { + return ruleRemoved, err + } + } + + return ruleRemoved, nil +} + +func (e *Enforcer) updatePolicyWithoutNotify(sec string, ptype string, oldRule []string, newRule []string) (bool, error) { + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.UpdatePolicy(sec, ptype, oldRule, newRule) + } + + if e.shouldPersist() { + if err := e.adapter.(persist.UpdatableAdapter).UpdatePolicy(sec, ptype, oldRule, newRule); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + ruleUpdated, err := e.model.UpdatePolicy(sec, ptype, oldRule, newRule) + if !ruleUpdated || err != nil { + return ruleUpdated, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, [][]string{oldRule}) // remove the old rule + if err != nil { + return ruleUpdated, err + } + err = e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, [][]string{newRule}) // add the new rule + if err != nil { + return ruleUpdated, err + } + } + + return ruleUpdated, nil +} + +func (e *Enforcer) updatePoliciesWithoutNotify(sec string, ptype string, oldRules [][]string, newRules [][]string) (bool, error) { + if len(newRules) != len(oldRules) { + return false, fmt.Errorf("the length of oldRules should be equal to the length of newRules, but got the length of oldRules is %d, the length of newRules is %d", len(oldRules), len(newRules)) + } + + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.UpdatePolicies(sec, ptype, oldRules, newRules) + } + + if e.shouldPersist() { + if err := e.adapter.(persist.UpdatableAdapter).UpdatePolicies(sec, ptype, oldRules, newRules); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + + ruleUpdated, err := e.model.UpdatePolicies(sec, ptype, oldRules, newRules) + if !ruleUpdated || err != nil { + return ruleUpdated, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, oldRules) // remove the old rules + if err != nil { + return ruleUpdated, err + } + err = e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, newRules) // add the new rules + if err != nil { + return ruleUpdated, err + } + } + + return ruleUpdated, nil +} + +// removePolicies removes rules from the current policy. +func (e *Enforcer) removePoliciesWithoutNotify(sec string, ptype string, rules [][]string) (bool, error) { + if hasPolicies, err := e.model.HasPolicies(sec, ptype, rules); !hasPolicies || err != nil { + return hasPolicies, err + } + + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.RemovePolicies(sec, ptype, rules) + } + + if e.shouldPersist() { + if err := e.adapter.(persist.BatchAdapter).RemovePolicies(sec, ptype, rules); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + + rulesRemoved, err := e.model.RemovePolicies(sec, ptype, rules) + if !rulesRemoved || err != nil { + return rulesRemoved, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, rules) + if err != nil { + return rulesRemoved, err + } + } + return rulesRemoved, nil +} + +// removeFilteredPolicy removes rules based on field filters from the current policy. +func (e *Enforcer) removeFilteredPolicyWithoutNotify(sec string, ptype string, fieldIndex int, fieldValues []string) (bool, error) { + if len(fieldValues) == 0 { + return false, Err.ErrInvalidFieldValuesParameter + } + + if e.dispatcher != nil && e.autoNotifyDispatcher { + return true, e.dispatcher.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) + } + + if e.shouldPersist() { + if err := e.adapter.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...); err != nil { + if err.Error() != notImplemented { + return false, err + } + } + } + + ruleRemoved, effects, err := e.model.RemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) + if !ruleRemoved || err != nil { + return ruleRemoved, err + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, effects) + if err != nil { + return ruleRemoved, err + } + } + + return ruleRemoved, nil +} + +func (e *Enforcer) updateFilteredPoliciesWithoutNotify(sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) { + var ( + oldRules [][]string + err error + ) + + if _, err = e.model.GetAssertion(sec, ptype); err != nil { + return oldRules, err + } + + if e.shouldPersist() { + if oldRules, err = e.adapter.(persist.UpdatableAdapter).UpdateFilteredPolicies(sec, ptype, newRules, fieldIndex, fieldValues...); err != nil { + if err.Error() != notImplemented { + return nil, err + } + } + // For compatibility, because some adapters return oldRules containing ptype, see https://github.com/casbin/xorm-adapter/issues/49 + for i, oldRule := range oldRules { + if len(oldRules[i]) == len(e.model[sec][ptype].Tokens)+1 { + oldRules[i] = oldRule[1:] + } + } + } + + if e.dispatcher != nil && e.autoNotifyDispatcher { + return oldRules, e.dispatcher.UpdateFilteredPolicies(sec, ptype, oldRules, newRules) + } + + ruleChanged, err := e.model.RemovePolicies(sec, ptype, oldRules) + if err != nil { + return oldRules, err + } + err = e.model.AddPolicies(sec, ptype, newRules) + if err != nil { + return oldRules, err + } + ruleChanged = ruleChanged && len(newRules) != 0 + if !ruleChanged { + return make([][]string, 0), nil + } + + if sec == "g" { + err := e.BuildIncrementalRoleLinks(model.PolicyRemove, ptype, oldRules) // remove the old rules + if err != nil { + return oldRules, err + } + err = e.BuildIncrementalRoleLinks(model.PolicyAdd, ptype, newRules) // add the new rules + if err != nil { + return oldRules, err + } + } + + return oldRules, nil +} + +// addPolicy adds a rule to the current policy. +func (e *Enforcer) addPolicy(sec string, ptype string, rule []string) (bool, error) { + ok, err := e.addPolicyWithoutNotify(sec, ptype, rule) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.WatcherEx); ok { + err = watcher.UpdateForAddPolicy(sec, ptype, rule...) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +// addPolicies adds rules to the current policy. +// If autoRemoveRepeat == true, existing rules are automatically filtered +// Otherwise, false is returned directly. +func (e *Enforcer) addPolicies(sec string, ptype string, rules [][]string, autoRemoveRepeat bool) (bool, error) { + ok, err := e.addPoliciesWithoutNotify(sec, ptype, rules, autoRemoveRepeat) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.WatcherEx); ok { + err = watcher.UpdateForAddPolicies(sec, ptype, rules...) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +// removePolicy removes a rule from the current policy. +func (e *Enforcer) removePolicy(sec string, ptype string, rule []string) (bool, error) { + ok, err := e.removePolicyWithoutNotify(sec, ptype, rule) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.WatcherEx); ok { + err = watcher.UpdateForRemovePolicy(sec, ptype, rule...) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +func (e *Enforcer) updatePolicy(sec string, ptype string, oldRule []string, newRule []string) (bool, error) { + ok, err := e.updatePolicyWithoutNotify(sec, ptype, oldRule, newRule) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.UpdatableWatcher); ok { + err = watcher.UpdateForUpdatePolicy(sec, ptype, oldRule, newRule) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +func (e *Enforcer) updatePolicies(sec string, ptype string, oldRules [][]string, newRules [][]string) (bool, error) { + ok, err := e.updatePoliciesWithoutNotify(sec, ptype, oldRules, newRules) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.UpdatableWatcher); ok { + err = watcher.UpdateForUpdatePolicies(sec, ptype, oldRules, newRules) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +// removePolicies removes rules from the current policy. +func (e *Enforcer) removePolicies(sec string, ptype string, rules [][]string) (bool, error) { + ok, err := e.removePoliciesWithoutNotify(sec, ptype, rules) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.WatcherEx); ok { + err = watcher.UpdateForRemovePolicies(sec, ptype, rules...) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +// removeFilteredPolicy removes rules based on field filters from the current policy. +func (e *Enforcer) removeFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues []string) (bool, error) { + ok, err := e.removeFilteredPolicyWithoutNotify(sec, ptype, fieldIndex, fieldValues) + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.WatcherEx); ok { + err = watcher.UpdateForRemoveFilteredPolicy(sec, ptype, fieldIndex, fieldValues...) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +func (e *Enforcer) updateFilteredPolicies(sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) (bool, error) { + oldRules, err := e.updateFilteredPoliciesWithoutNotify(sec, ptype, newRules, fieldIndex, fieldValues...) + ok := len(oldRules) != 0 + if !ok || err != nil { + return ok, err + } + + if e.shouldNotify() { + var err error + if watcher, ok := e.watcher.(persist.UpdatableWatcher); ok { + err = watcher.UpdateForUpdatePolicies(sec, ptype, oldRules, newRules) + } else { + err = e.watcher.Update() + } + return true, err + } + + return true, nil +} + +func (e *Enforcer) GetFieldIndex(ptype string, field string) (int, error) { + return e.model.GetFieldIndex(ptype, field) +} + +func (e *Enforcer) SetFieldIndex(ptype string, field string, index int) { + assertion := e.model["p"][ptype] + assertion.FieldIndexMap[field] = index +} diff --git a/vendor/github.com/casbin/casbin/v2/log/default_logger.go b/vendor/github.com/casbin/casbin/v2/log/default_logger.go new file mode 100644 index 000000000..9994f390b --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/log/default_logger.go @@ -0,0 +1,104 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "fmt" + "log" + "strings" +) + +// DefaultLogger is the implementation for a Logger using golang log. +type DefaultLogger struct { + enabled bool +} + +func (l *DefaultLogger) EnableLog(enable bool) { + l.enabled = enable +} + +func (l *DefaultLogger) IsEnabled() bool { + return l.enabled +} + +func (l *DefaultLogger) LogModel(model [][]string) { + if !l.enabled { + return + } + var str strings.Builder + str.WriteString("Model: ") + for _, v := range model { + str.WriteString(fmt.Sprintf("%v\n", v)) + } + + log.Println(str.String()) +} + +func (l *DefaultLogger) LogEnforce(matcher string, request []interface{}, result bool, explains [][]string) { + if !l.enabled { + return + } + + var reqStr strings.Builder + reqStr.WriteString("Request: ") + for i, rval := range request { + if i != len(request)-1 { + reqStr.WriteString(fmt.Sprintf("%v, ", rval)) + } else { + reqStr.WriteString(fmt.Sprintf("%v", rval)) + } + } + reqStr.WriteString(fmt.Sprintf(" ---> %t\n", result)) + + reqStr.WriteString("Hit Policy: ") + for i, pval := range explains { + if i != len(explains)-1 { + reqStr.WriteString(fmt.Sprintf("%v, ", pval)) + } else { + reqStr.WriteString(fmt.Sprintf("%v \n", pval)) + } + } + + log.Println(reqStr.String()) +} + +func (l *DefaultLogger) LogPolicy(policy map[string][][]string) { + if !l.enabled { + return + } + + var str strings.Builder + str.WriteString("Policy: ") + for k, v := range policy { + str.WriteString(fmt.Sprintf("%s : %v\n", k, v)) + } + + log.Println(str.String()) +} + +func (l *DefaultLogger) LogRole(roles []string) { + if !l.enabled { + return + } + + log.Println("Roles: ", strings.Join(roles, "\n")) +} + +func (l *DefaultLogger) LogError(err error, msg ...string) { + if !l.enabled { + return + } + log.Println(msg, err) +} diff --git a/vendor/github.com/casbin/casbin/v2/log/log_util.go b/vendor/github.com/casbin/casbin/v2/log/log_util.go new file mode 100644 index 000000000..7edabf899 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/log/log_util.go @@ -0,0 +1,52 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +var logger Logger = &DefaultLogger{} + +// SetLogger sets the current logger. +func SetLogger(l Logger) { + logger = l +} + +// GetLogger returns the current logger. +func GetLogger() Logger { + return logger +} + +// LogModel logs the model information. +func LogModel(model [][]string) { + logger.LogModel(model) +} + +// LogEnforce logs the enforcer information. +func LogEnforce(matcher string, request []interface{}, result bool, explains [][]string) { + logger.LogEnforce(matcher, request, result, explains) +} + +// LogRole log info related to role. +func LogRole(roles []string) { + logger.LogRole(roles) +} + +// LogPolicy logs the policy information. +func LogPolicy(policy map[string][][]string) { + logger.LogPolicy(policy) +} + +// LogError logs the error information. +func LogError(err error, msg ...string) { + logger.LogError(err, msg...) +} diff --git a/vendor/github.com/casbin/casbin/v2/log/logger.go b/vendor/github.com/casbin/casbin/v2/log/logger.go new file mode 100644 index 000000000..8982cae6f --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/log/logger.go @@ -0,0 +1,41 @@ +// Copyright 2018 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +//go:generate mockgen -destination=./mocks/mock_logger.go -package=mocks github.com/casbin/casbin/v2/log Logger + +// Logger is the logging interface implementation. +type Logger interface { + // EnableLog controls whether print the message. + EnableLog(bool) + + // IsEnabled returns if logger is enabled. + IsEnabled() bool + + // LogModel log info related to model. + LogModel(model [][]string) + + // LogEnforce log info related to enforce. + LogEnforce(matcher string, request []interface{}, result bool, explains [][]string) + + // LogRole log info related to role. + LogRole(roles []string) + + // LogPolicy log info related to policy. + LogPolicy(policy map[string][][]string) + + // LogError log info relate to error + LogError(err error, msg ...string) +} diff --git a/vendor/github.com/casbin/casbin/v2/management_api.go b/vendor/github.com/casbin/casbin/v2/management_api.go new file mode 100644 index 000000000..6641f8342 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/management_api.go @@ -0,0 +1,500 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "errors" + "fmt" + "strings" + + "github.com/casbin/casbin/v2/constant" + "github.com/casbin/casbin/v2/util" + "github.com/casbin/govaluate" +) + +// GetAllSubjects gets the list of subjects that show up in the current policy. +func (e *Enforcer) GetAllSubjects() ([]string, error) { + return e.model.GetValuesForFieldInPolicyAllTypesByName("p", constant.SubjectIndex) +} + +// GetAllNamedSubjects gets the list of subjects that show up in the current named policy. +func (e *Enforcer) GetAllNamedSubjects(ptype string) ([]string, error) { + fieldIndex, err := e.model.GetFieldIndex(ptype, constant.SubjectIndex) + if err != nil { + return nil, err + } + return e.model.GetValuesForFieldInPolicy("p", ptype, fieldIndex) +} + +// GetAllObjects gets the list of objects that show up in the current policy. +func (e *Enforcer) GetAllObjects() ([]string, error) { + return e.model.GetValuesForFieldInPolicyAllTypesByName("p", constant.ObjectIndex) +} + +// GetAllNamedObjects gets the list of objects that show up in the current named policy. +func (e *Enforcer) GetAllNamedObjects(ptype string) ([]string, error) { + fieldIndex, err := e.model.GetFieldIndex(ptype, constant.ObjectIndex) + if err != nil { + return nil, err + } + return e.model.GetValuesForFieldInPolicy("p", ptype, fieldIndex) +} + +// GetAllActions gets the list of actions that show up in the current policy. +func (e *Enforcer) GetAllActions() ([]string, error) { + return e.model.GetValuesForFieldInPolicyAllTypesByName("p", constant.ActionIndex) +} + +// GetAllNamedActions gets the list of actions that show up in the current named policy. +func (e *Enforcer) GetAllNamedActions(ptype string) ([]string, error) { + fieldIndex, err := e.model.GetFieldIndex(ptype, constant.ActionIndex) + if err != nil { + return nil, err + } + return e.model.GetValuesForFieldInPolicy("p", ptype, fieldIndex) +} + +// GetAllRoles gets the list of roles that show up in the current policy. +func (e *Enforcer) GetAllRoles() ([]string, error) { + return e.model.GetValuesForFieldInPolicyAllTypes("g", 1) +} + +// GetAllNamedRoles gets the list of roles that show up in the current named policy. +func (e *Enforcer) GetAllNamedRoles(ptype string) ([]string, error) { + return e.model.GetValuesForFieldInPolicy("g", ptype, 1) +} + +// GetPolicy gets all the authorization rules in the policy. +func (e *Enforcer) GetPolicy() ([][]string, error) { + return e.GetNamedPolicy("p") +} + +// GetFilteredPolicy gets all the authorization rules in the policy, field filters can be specified. +func (e *Enforcer) GetFilteredPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) { + return e.GetFilteredNamedPolicy("p", fieldIndex, fieldValues...) +} + +// GetNamedPolicy gets all the authorization rules in the named policy. +func (e *Enforcer) GetNamedPolicy(ptype string) ([][]string, error) { + return e.model.GetPolicy("p", ptype) +} + +// GetFilteredNamedPolicy gets all the authorization rules in the named policy, field filters can be specified. +func (e *Enforcer) GetFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { + return e.model.GetFilteredPolicy("p", ptype, fieldIndex, fieldValues...) +} + +// GetGroupingPolicy gets all the role inheritance rules in the policy. +func (e *Enforcer) GetGroupingPolicy() ([][]string, error) { + return e.GetNamedGroupingPolicy("g") +} + +// GetFilteredGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. +func (e *Enforcer) GetFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) ([][]string, error) { + return e.GetFilteredNamedGroupingPolicy("g", fieldIndex, fieldValues...) +} + +// GetNamedGroupingPolicy gets all the role inheritance rules in the policy. +func (e *Enforcer) GetNamedGroupingPolicy(ptype string) ([][]string, error) { + return e.model.GetPolicy("g", ptype) +} + +// GetFilteredNamedGroupingPolicy gets all the role inheritance rules in the policy, field filters can be specified. +func (e *Enforcer) GetFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { + return e.model.GetFilteredPolicy("g", ptype, fieldIndex, fieldValues...) +} + +// GetFilteredNamedPolicyWithMatcher gets rules based on matcher from the policy. +func (e *Enforcer) GetFilteredNamedPolicyWithMatcher(ptype string, matcher string) ([][]string, error) { + var res [][]string + var err error + + functions := e.fm.GetFunctions() + if _, ok := e.model["g"]; ok { + for key, ast := range e.model["g"] { + // g must be a normal role definition (ast.RM != nil) + // or a conditional role definition (ast.CondRM != nil) + // ast.RM and ast.CondRM shouldn't be nil at the same time + if ast.RM != nil { + functions[key] = util.GenerateGFunction(ast.RM) + } + if ast.CondRM != nil { + functions[key] = util.GenerateConditionalGFunction(ast.CondRM) + } + } + } + + var expString string + if matcher == "" { + return res, fmt.Errorf("matcher is empty") + } else { + expString = util.RemoveComments(util.EscapeAssertion(matcher)) + } + + var expression *govaluate.EvaluableExpression + + expression, err = govaluate.NewEvaluableExpressionWithFunctions(expString, functions) + if err != nil { + return res, err + } + + pTokens := make(map[string]int, len(e.model["p"][ptype].Tokens)) + for i, token := range e.model["p"][ptype].Tokens { + pTokens[token] = i + } + + parameters := enforceParameters{ + pTokens: pTokens, + } + + if policyLen := len(e.model["p"][ptype].Policy); policyLen != 0 && strings.Contains(expString, ptype+"_") { + for _, pvals := range e.model["p"][ptype].Policy { + if len(e.model["p"][ptype].Tokens) != len(pvals) { + return res, fmt.Errorf( + "invalid policy size: expected %d, got %d, pvals: %v", + len(e.model["p"][ptype].Tokens), + len(pvals), + pvals) + } + + parameters.pVals = pvals + + result, err := expression.Eval(parameters) + + if err != nil { + return res, err + } + + switch result := result.(type) { + case bool: + if result { + res = append(res, pvals) + } + case float64: + if result != 0 { + res = append(res, pvals) + } + default: + return res, errors.New("matcher result should be bool, int or float") + } + } + } + return res, nil +} + +// HasPolicy determines whether an authorization rule exists. +func (e *Enforcer) HasPolicy(params ...interface{}) (bool, error) { + return e.HasNamedPolicy("p", params...) +} + +// HasNamedPolicy determines whether a named authorization rule exists. +func (e *Enforcer) HasNamedPolicy(ptype string, params ...interface{}) (bool, error) { + if strSlice, ok := params[0].([]string); len(params) == 1 && ok { + return e.model.HasPolicy("p", ptype, strSlice) + } + + policy := make([]string, 0) + for _, param := range params { + policy = append(policy, param.(string)) + } + + return e.model.HasPolicy("p", ptype, policy) +} + +// AddPolicy adds an authorization rule to the current policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *Enforcer) AddPolicy(params ...interface{}) (bool, error) { + return e.AddNamedPolicy("p", params...) +} + +// AddPolicies adds authorization rules to the current policy. +// If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. +// Otherwise the function returns true for the corresponding rule by adding the new rule. +func (e *Enforcer) AddPolicies(rules [][]string) (bool, error) { + return e.AddNamedPolicies("p", rules) +} + +// AddPoliciesEx adds authorization rules to the current policy. +// If the rule already exists, the rule will not be added. +// But unlike AddPolicies, other non-existent rules are added instead of returning false directly. +func (e *Enforcer) AddPoliciesEx(rules [][]string) (bool, error) { + return e.AddNamedPoliciesEx("p", rules) +} + +// AddNamedPolicy adds an authorization rule to the current named policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *Enforcer) AddNamedPolicy(ptype string, params ...interface{}) (bool, error) { + if strSlice, ok := params[0].([]string); len(params) == 1 && ok { + strSlice = append(make([]string, 0, len(strSlice)), strSlice...) + return e.addPolicy("p", ptype, strSlice) + } + policy := make([]string, 0) + for _, param := range params { + policy = append(policy, param.(string)) + } + + return e.addPolicy("p", ptype, policy) +} + +// AddNamedPolicies adds authorization rules to the current named policy. +// If the rule already exists, the function returns false for the corresponding rule and the rule will not be added. +// Otherwise the function returns true for the corresponding by adding the new rule. +func (e *Enforcer) AddNamedPolicies(ptype string, rules [][]string) (bool, error) { + return e.addPolicies("p", ptype, rules, false) +} + +// AddNamedPoliciesEx adds authorization rules to the current named policy. +// If the rule already exists, the rule will not be added. +// But unlike AddNamedPolicies, other non-existent rules are added instead of returning false directly. +func (e *Enforcer) AddNamedPoliciesEx(ptype string, rules [][]string) (bool, error) { + return e.addPolicies("p", ptype, rules, true) +} + +// RemovePolicy removes an authorization rule from the current policy. +func (e *Enforcer) RemovePolicy(params ...interface{}) (bool, error) { + return e.RemoveNamedPolicy("p", params...) +} + +// UpdatePolicy updates an authorization rule from the current policy. +func (e *Enforcer) UpdatePolicy(oldPolicy []string, newPolicy []string) (bool, error) { + return e.UpdateNamedPolicy("p", oldPolicy, newPolicy) +} + +func (e *Enforcer) UpdateNamedPolicy(ptype string, p1 []string, p2 []string) (bool, error) { + return e.updatePolicy("p", ptype, p1, p2) +} + +// UpdatePolicies updates authorization rules from the current policies. +func (e *Enforcer) UpdatePolicies(oldPolices [][]string, newPolicies [][]string) (bool, error) { + return e.UpdateNamedPolicies("p", oldPolices, newPolicies) +} + +func (e *Enforcer) UpdateNamedPolicies(ptype string, p1 [][]string, p2 [][]string) (bool, error) { + return e.updatePolicies("p", ptype, p1, p2) +} + +func (e *Enforcer) UpdateFilteredPolicies(newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) { + return e.UpdateFilteredNamedPolicies("p", newPolicies, fieldIndex, fieldValues...) +} + +func (e *Enforcer) UpdateFilteredNamedPolicies(ptype string, newPolicies [][]string, fieldIndex int, fieldValues ...string) (bool, error) { + return e.updateFilteredPolicies("p", ptype, newPolicies, fieldIndex, fieldValues...) +} + +// RemovePolicies removes authorization rules from the current policy. +func (e *Enforcer) RemovePolicies(rules [][]string) (bool, error) { + return e.RemoveNamedPolicies("p", rules) +} + +// RemoveFilteredPolicy removes an authorization rule from the current policy, field filters can be specified. +func (e *Enforcer) RemoveFilteredPolicy(fieldIndex int, fieldValues ...string) (bool, error) { + return e.RemoveFilteredNamedPolicy("p", fieldIndex, fieldValues...) +} + +// RemoveNamedPolicy removes an authorization rule from the current named policy. +func (e *Enforcer) RemoveNamedPolicy(ptype string, params ...interface{}) (bool, error) { + if strSlice, ok := params[0].([]string); len(params) == 1 && ok { + return e.removePolicy("p", ptype, strSlice) + } + policy := make([]string, 0) + for _, param := range params { + policy = append(policy, param.(string)) + } + + return e.removePolicy("p", ptype, policy) +} + +// RemoveNamedPolicies removes authorization rules from the current named policy. +func (e *Enforcer) RemoveNamedPolicies(ptype string, rules [][]string) (bool, error) { + return e.removePolicies("p", ptype, rules) +} + +// RemoveFilteredNamedPolicy removes an authorization rule from the current named policy, field filters can be specified. +func (e *Enforcer) RemoveFilteredNamedPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { + return e.removeFilteredPolicy("p", ptype, fieldIndex, fieldValues) +} + +// HasGroupingPolicy determines whether a role inheritance rule exists. +func (e *Enforcer) HasGroupingPolicy(params ...interface{}) (bool, error) { + return e.HasNamedGroupingPolicy("g", params...) +} + +// HasNamedGroupingPolicy determines whether a named role inheritance rule exists. +func (e *Enforcer) HasNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { + if strSlice, ok := params[0].([]string); len(params) == 1 && ok { + return e.model.HasPolicy("g", ptype, strSlice) + } + + policy := make([]string, 0) + for _, param := range params { + policy = append(policy, param.(string)) + } + + return e.model.HasPolicy("g", ptype, policy) +} + +// AddGroupingPolicy adds a role inheritance rule to the current policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *Enforcer) AddGroupingPolicy(params ...interface{}) (bool, error) { + return e.AddNamedGroupingPolicy("g", params...) +} + +// AddGroupingPolicies adds role inheritance rules to the current policy. +// If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. +// Otherwise the function returns true for the corresponding policy rule by adding the new rule. +func (e *Enforcer) AddGroupingPolicies(rules [][]string) (bool, error) { + return e.AddNamedGroupingPolicies("g", rules) +} + +// AddGroupingPoliciesEx adds role inheritance rules to the current policy. +// If the rule already exists, the rule will not be added. +// But unlike AddGroupingPolicies, other non-existent rules are added instead of returning false directly. +func (e *Enforcer) AddGroupingPoliciesEx(rules [][]string) (bool, error) { + return e.AddNamedGroupingPoliciesEx("g", rules) +} + +// AddNamedGroupingPolicy adds a named role inheritance rule to the current policy. +// If the rule already exists, the function returns false and the rule will not be added. +// Otherwise the function returns true by adding the new rule. +func (e *Enforcer) AddNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { + var ruleAdded bool + var err error + if strSlice, ok := params[0].([]string); len(params) == 1 && ok { + ruleAdded, err = e.addPolicy("g", ptype, strSlice) + } else { + policy := make([]string, 0) + for _, param := range params { + policy = append(policy, param.(string)) + } + + ruleAdded, err = e.addPolicy("g", ptype, policy) + } + + return ruleAdded, err +} + +// AddNamedGroupingPolicies adds named role inheritance rules to the current policy. +// If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added. +// Otherwise the function returns true for the corresponding policy rule by adding the new rule. +func (e *Enforcer) AddNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { + return e.addPolicies("g", ptype, rules, false) +} + +// AddNamedGroupingPoliciesEx adds named role inheritance rules to the current policy. +// If the rule already exists, the rule will not be added. +// But unlike AddNamedGroupingPolicies, other non-existent rules are added instead of returning false directly. +func (e *Enforcer) AddNamedGroupingPoliciesEx(ptype string, rules [][]string) (bool, error) { + return e.addPolicies("g", ptype, rules, true) +} + +// RemoveGroupingPolicy removes a role inheritance rule from the current policy. +func (e *Enforcer) RemoveGroupingPolicy(params ...interface{}) (bool, error) { + return e.RemoveNamedGroupingPolicy("g", params...) +} + +// RemoveGroupingPolicies removes role inheritance rules from the current policy. +func (e *Enforcer) RemoveGroupingPolicies(rules [][]string) (bool, error) { + return e.RemoveNamedGroupingPolicies("g", rules) +} + +// RemoveFilteredGroupingPolicy removes a role inheritance rule from the current policy, field filters can be specified. +func (e *Enforcer) RemoveFilteredGroupingPolicy(fieldIndex int, fieldValues ...string) (bool, error) { + return e.RemoveFilteredNamedGroupingPolicy("g", fieldIndex, fieldValues...) +} + +// RemoveNamedGroupingPolicy removes a role inheritance rule from the current named policy. +func (e *Enforcer) RemoveNamedGroupingPolicy(ptype string, params ...interface{}) (bool, error) { + var ruleRemoved bool + var err error + if strSlice, ok := params[0].([]string); len(params) == 1 && ok { + ruleRemoved, err = e.removePolicy("g", ptype, strSlice) + } else { + policy := make([]string, 0) + for _, param := range params { + policy = append(policy, param.(string)) + } + + ruleRemoved, err = e.removePolicy("g", ptype, policy) + } + + return ruleRemoved, err +} + +// RemoveNamedGroupingPolicies removes role inheritance rules from the current named policy. +func (e *Enforcer) RemoveNamedGroupingPolicies(ptype string, rules [][]string) (bool, error) { + return e.removePolicies("g", ptype, rules) +} + +func (e *Enforcer) UpdateGroupingPolicy(oldRule []string, newRule []string) (bool, error) { + return e.UpdateNamedGroupingPolicy("g", oldRule, newRule) +} + +// UpdateGroupingPolicies updates authorization rules from the current policies. +func (e *Enforcer) UpdateGroupingPolicies(oldRules [][]string, newRules [][]string) (bool, error) { + return e.UpdateNamedGroupingPolicies("g", oldRules, newRules) +} + +func (e *Enforcer) UpdateNamedGroupingPolicy(ptype string, oldRule []string, newRule []string) (bool, error) { + return e.updatePolicy("g", ptype, oldRule, newRule) +} + +func (e *Enforcer) UpdateNamedGroupingPolicies(ptype string, oldRules [][]string, newRules [][]string) (bool, error) { + return e.updatePolicies("g", ptype, oldRules, newRules) +} + +// RemoveFilteredNamedGroupingPolicy removes a role inheritance rule from the current named policy, field filters can be specified. +func (e *Enforcer) RemoveFilteredNamedGroupingPolicy(ptype string, fieldIndex int, fieldValues ...string) (bool, error) { + return e.removeFilteredPolicy("g", ptype, fieldIndex, fieldValues) +} + +// AddFunction adds a customized function. +func (e *Enforcer) AddFunction(name string, function govaluate.ExpressionFunction) { + e.fm.AddFunction(name, function) +} + +func (e *Enforcer) SelfAddPolicy(sec string, ptype string, rule []string) (bool, error) { + return e.addPolicyWithoutNotify(sec, ptype, rule) +} + +func (e *Enforcer) SelfAddPolicies(sec string, ptype string, rules [][]string) (bool, error) { + return e.addPoliciesWithoutNotify(sec, ptype, rules, false) +} + +func (e *Enforcer) SelfAddPoliciesEx(sec string, ptype string, rules [][]string) (bool, error) { + return e.addPoliciesWithoutNotify(sec, ptype, rules, true) +} + +func (e *Enforcer) SelfRemovePolicy(sec string, ptype string, rule []string) (bool, error) { + return e.removePolicyWithoutNotify(sec, ptype, rule) +} + +func (e *Enforcer) SelfRemovePolicies(sec string, ptype string, rules [][]string) (bool, error) { + return e.removePoliciesWithoutNotify(sec, ptype, rules) +} + +func (e *Enforcer) SelfRemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) (bool, error) { + return e.removeFilteredPolicyWithoutNotify(sec, ptype, fieldIndex, fieldValues) +} + +func (e *Enforcer) SelfUpdatePolicy(sec string, ptype string, oldRule, newRule []string) (bool, error) { + return e.updatePolicyWithoutNotify(sec, ptype, oldRule, newRule) +} + +func (e *Enforcer) SelfUpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) (bool, error) { + return e.updatePoliciesWithoutNotify(sec, ptype, oldRules, newRules) +} diff --git a/vendor/github.com/casbin/casbin/v2/model/assertion.go b/vendor/github.com/casbin/casbin/v2/model/assertion.go new file mode 100644 index 000000000..7c5381d72 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/model/assertion.go @@ -0,0 +1,194 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "errors" + "strings" + + "github.com/casbin/casbin/v2/log" + "github.com/casbin/casbin/v2/rbac" +) + +// Assertion represents an expression in a section of the model. +// For example: r = sub, obj, act. +type Assertion struct { + Key string + Value string + Tokens []string + ParamsTokens []string + Policy [][]string + PolicyMap map[string]int + RM rbac.RoleManager + CondRM rbac.ConditionalRoleManager + FieldIndexMap map[string]int + + logger log.Logger +} + +func (ast *Assertion) buildIncrementalRoleLinks(rm rbac.RoleManager, op PolicyOp, rules [][]string) error { + ast.RM = rm + count := strings.Count(ast.Value, "_") + if count < 2 { + return errors.New("the number of \"_\" in role definition should be at least 2") + } + + for _, rule := range rules { + if len(rule) < count { + return errors.New("grouping policy elements do not meet role definition") + } + if len(rule) > count { + rule = rule[:count] + } + switch op { + case PolicyAdd: + err := rm.AddLink(rule[0], rule[1], rule[2:]...) + if err != nil { + return err + } + case PolicyRemove: + err := rm.DeleteLink(rule[0], rule[1], rule[2:]...) + if err != nil { + return err + } + } + } + return nil +} + +func (ast *Assertion) buildRoleLinks(rm rbac.RoleManager) error { + ast.RM = rm + count := strings.Count(ast.Value, "_") + if count < 2 { + return errors.New("the number of \"_\" in role definition should be at least 2") + } + for _, rule := range ast.Policy { + if len(rule) < count { + return errors.New("grouping policy elements do not meet role definition") + } + if len(rule) > count { + rule = rule[:count] + } + err := ast.RM.AddLink(rule[0], rule[1], rule[2:]...) + if err != nil { + return err + } + } + + return nil +} + +func (ast *Assertion) buildIncrementalConditionalRoleLinks(condRM rbac.ConditionalRoleManager, op PolicyOp, rules [][]string) error { + ast.CondRM = condRM + count := strings.Count(ast.Value, "_") + if count < 2 { + return errors.New("the number of \"_\" in role definition should be at least 2") + } + + for _, rule := range rules { + if len(rule) < count { + return errors.New("grouping policy elements do not meet role definition") + } + if len(rule) > count { + rule = rule[:count] + } + + var err error + domainRule := rule[2:len(ast.Tokens)] + + switch op { + case PolicyAdd: + err = ast.addConditionalRoleLink(rule, domainRule) + case PolicyRemove: + err = ast.CondRM.DeleteLink(rule[0], rule[1], rule[2:]...) + } + if err != nil { + return err + } + } + + return nil +} + +func (ast *Assertion) buildConditionalRoleLinks(condRM rbac.ConditionalRoleManager) error { + ast.CondRM = condRM + count := strings.Count(ast.Value, "_") + if count < 2 { + return errors.New("the number of \"_\" in role definition should be at least 2") + } + for _, rule := range ast.Policy { + if len(rule) < count { + return errors.New("grouping policy elements do not meet role definition") + } + if len(rule) > count { + rule = rule[:count] + } + + domainRule := rule[2:len(ast.Tokens)] + + err := ast.addConditionalRoleLink(rule, domainRule) + if err != nil { + return err + } + } + + return nil +} + +// addConditionalRoleLink adds Link to rbac.ConditionalRoleManager and sets the parameters for LinkConditionFunc. +func (ast *Assertion) addConditionalRoleLink(rule []string, domainRule []string) error { + var err error + if len(domainRule) == 0 { + err = ast.CondRM.AddLink(rule[0], rule[1]) + if err == nil { + ast.CondRM.SetLinkConditionFuncParams(rule[0], rule[1], rule[len(ast.Tokens):]...) + } + } else { + domain := domainRule[0] + err = ast.CondRM.AddLink(rule[0], rule[1], domain) + if err == nil { + ast.CondRM.SetDomainLinkConditionFuncParams(rule[0], rule[1], domain, rule[len(ast.Tokens):]...) + } + } + return err +} + +func (ast *Assertion) setLogger(logger log.Logger) { + ast.logger = logger +} + +func (ast *Assertion) copy() *Assertion { + tokens := append([]string(nil), ast.Tokens...) + policy := make([][]string, len(ast.Policy)) + + for i, p := range ast.Policy { + policy[i] = append(policy[i], p...) + } + policyMap := make(map[string]int) + for k, v := range ast.PolicyMap { + policyMap[k] = v + } + + newAst := &Assertion{ + Key: ast.Key, + Value: ast.Value, + PolicyMap: policyMap, + Tokens: tokens, + Policy: policy, + FieldIndexMap: ast.FieldIndexMap, + } + + return newAst +} diff --git a/vendor/github.com/casbin/casbin/v2/model/function.go b/vendor/github.com/casbin/casbin/v2/model/function.go new file mode 100644 index 000000000..f1a8d0075 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/model/function.go @@ -0,0 +1,66 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sync" + + "github.com/casbin/casbin/v2/util" + "github.com/casbin/govaluate" +) + +// FunctionMap represents the collection of Function. +type FunctionMap struct { + fns *sync.Map +} + +// [string]govaluate.ExpressionFunction + +// AddFunction adds an expression function. +func (fm *FunctionMap) AddFunction(name string, function govaluate.ExpressionFunction) { + fm.fns.LoadOrStore(name, function) +} + +// LoadFunctionMap loads an initial function map. +func LoadFunctionMap() FunctionMap { + fm := &FunctionMap{} + fm.fns = &sync.Map{} + + fm.AddFunction("keyMatch", util.KeyMatchFunc) + fm.AddFunction("keyGet", util.KeyGetFunc) + fm.AddFunction("keyMatch2", util.KeyMatch2Func) + fm.AddFunction("keyGet2", util.KeyGet2Func) + fm.AddFunction("keyMatch3", util.KeyMatch3Func) + fm.AddFunction("keyGet3", util.KeyGet3Func) + fm.AddFunction("keyMatch4", util.KeyMatch4Func) + fm.AddFunction("keyMatch5", util.KeyMatch5Func) + fm.AddFunction("regexMatch", util.RegexMatchFunc) + fm.AddFunction("ipMatch", util.IPMatchFunc) + fm.AddFunction("globMatch", util.GlobMatchFunc) + + return *fm +} + +// GetFunctions return a map with all the functions. +func (fm *FunctionMap) GetFunctions() map[string]govaluate.ExpressionFunction { + ret := make(map[string]govaluate.ExpressionFunction) + + fm.fns.Range(func(k interface{}, v interface{}) bool { + ret[k.(string)] = v.(govaluate.ExpressionFunction) + return true + }) + + return ret +} diff --git a/vendor/github.com/casbin/casbin/v2/model/model.go b/vendor/github.com/casbin/casbin/v2/model/model.go new file mode 100644 index 000000000..bffb9deeb --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/model/model.go @@ -0,0 +1,438 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "container/list" + "errors" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/casbin/casbin/v2/config" + "github.com/casbin/casbin/v2/constant" + "github.com/casbin/casbin/v2/log" + "github.com/casbin/casbin/v2/util" +) + +// Model represents the whole access control model. +type Model map[string]AssertionMap + +// AssertionMap is the collection of assertions, can be "r", "p", "g", "e", "m". +type AssertionMap map[string]*Assertion + +const defaultDomain string = "" +const defaultSeparator = "::" + +var sectionNameMap = map[string]string{ + "r": "request_definition", + "p": "policy_definition", + "g": "role_definition", + "e": "policy_effect", + "m": "matchers", +} + +// Minimal required sections for a model to be valid. +var requiredSections = []string{"r", "p", "e", "m"} + +func loadAssertion(model Model, cfg config.ConfigInterface, sec string, key string) bool { + value := cfg.String(sectionNameMap[sec] + "::" + key) + return model.AddDef(sec, key, value) +} + +var paramsRegex = regexp.MustCompile(`\((.*?)\)`) + +// getParamsToken Get ParamsToken from Assertion.Value. +func getParamsToken(value string) []string { + paramsString := paramsRegex.FindString(value) + if paramsString == "" { + return nil + } + paramsString = strings.TrimSuffix(strings.TrimPrefix(paramsString, "("), ")") + return strings.Split(paramsString, ",") +} + +// AddDef adds an assertion to the model. +func (model Model) AddDef(sec string, key string, value string) bool { + if value == "" { + return false + } + + ast := Assertion{} + ast.Key = key + ast.Value = value + ast.PolicyMap = make(map[string]int) + ast.FieldIndexMap = make(map[string]int) + ast.setLogger(model.GetLogger()) + + if sec == "r" || sec == "p" { + ast.Tokens = strings.Split(ast.Value, ",") + for i := range ast.Tokens { + ast.Tokens[i] = key + "_" + strings.TrimSpace(ast.Tokens[i]) + } + } else if sec == "g" { + ast.ParamsTokens = getParamsToken(ast.Value) + ast.Tokens = strings.Split(ast.Value, ",") + ast.Tokens = ast.Tokens[:len(ast.Tokens)-len(ast.ParamsTokens)] + } else { + ast.Value = util.RemoveComments(util.EscapeAssertion(ast.Value)) + } + + if sec == "m" && strings.Contains(ast.Value, "in") { + ast.Value = strings.Replace(strings.Replace(ast.Value, "[", "(", -1), "]", ")", -1) + } + + _, ok := model[sec] + if !ok { + model[sec] = make(AssertionMap) + } + + model[sec][key] = &ast + return true +} + +func getKeySuffix(i int) string { + if i == 1 { + return "" + } + + return strconv.Itoa(i) +} + +func loadSection(model Model, cfg config.ConfigInterface, sec string) { + i := 1 + for { + if !loadAssertion(model, cfg, sec, sec+getKeySuffix(i)) { + break + } else { + i++ + } + } +} + +// SetLogger sets the model's logger. +func (model Model) SetLogger(logger log.Logger) { + for _, astMap := range model { + for _, ast := range astMap { + ast.logger = logger + } + } + model["logger"] = AssertionMap{"logger": &Assertion{logger: logger}} +} + +// GetLogger returns the model's logger. +func (model Model) GetLogger() log.Logger { + return model["logger"]["logger"].logger +} + +// NewModel creates an empty model. +func NewModel() Model { + m := make(Model) + m.SetLogger(&log.DefaultLogger{}) + + return m +} + +// NewModelFromFile creates a model from a .CONF file. +func NewModelFromFile(path string) (Model, error) { + m := NewModel() + + err := m.LoadModel(path) + if err != nil { + return nil, err + } + + return m, nil +} + +// NewModelFromString creates a model from a string which contains model text. +func NewModelFromString(text string) (Model, error) { + m := NewModel() + + err := m.LoadModelFromText(text) + if err != nil { + return nil, err + } + + return m, nil +} + +// LoadModel loads the model from model CONF file. +func (model Model) LoadModel(path string) error { + cfg, err := config.NewConfig(path) + if err != nil { + return err + } + + return model.loadModelFromConfig(cfg) +} + +// LoadModelFromText loads the model from the text. +func (model Model) LoadModelFromText(text string) error { + cfg, err := config.NewConfigFromText(text) + if err != nil { + return err + } + + return model.loadModelFromConfig(cfg) +} + +func (model Model) loadModelFromConfig(cfg config.ConfigInterface) error { + for s := range sectionNameMap { + loadSection(model, cfg, s) + } + ms := make([]string, 0) + for _, rs := range requiredSections { + if !model.hasSection(rs) { + ms = append(ms, sectionNameMap[rs]) + } + } + if len(ms) > 0 { + return fmt.Errorf("missing required sections: %s", strings.Join(ms, ",")) + } + return nil +} + +func (model Model) hasSection(sec string) bool { + section := model[sec] + return section != nil +} + +func (model Model) GetAssertion(sec string, ptype string) (*Assertion, error) { + if model[sec] == nil { + return nil, fmt.Errorf("missing required section %s", sec) + } + if model[sec][ptype] == nil { + return nil, fmt.Errorf("missing required definition %s in section %s", ptype, sec) + } + return model[sec][ptype], nil +} + +// PrintModel prints the model to the log. +func (model Model) PrintModel() { + if !model.GetLogger().IsEnabled() { + return + } + + var modelInfo [][]string + for k, v := range model { + if k == "logger" { + continue + } + + for i, j := range v { + modelInfo = append(modelInfo, []string{k, i, j.Value}) + } + } + + model.GetLogger().LogModel(modelInfo) +} + +func (model Model) SortPoliciesBySubjectHierarchy() error { + if model["e"]["e"].Value != constant.SubjectPriorityEffect { + return nil + } + g, err := model.GetAssertion("g", "g") + if err != nil { + return err + } + subIndex := 0 + for ptype, assertion := range model["p"] { + domainIndex, err := model.GetFieldIndex(ptype, constant.DomainIndex) + if err != nil { + domainIndex = -1 + } + policies := assertion.Policy + subjectHierarchyMap, err := getSubjectHierarchyMap(g.Policy) + if err != nil { + return err + } + sort.SliceStable(policies, func(i, j int) bool { + domain1, domain2 := defaultDomain, defaultDomain + if domainIndex != -1 { + domain1 = policies[i][domainIndex] + domain2 = policies[j][domainIndex] + } + name1, name2 := getNameWithDomain(domain1, policies[i][subIndex]), getNameWithDomain(domain2, policies[j][subIndex]) + p1 := subjectHierarchyMap[name1] + p2 := subjectHierarchyMap[name2] + return p1 > p2 + }) + for i, policy := range assertion.Policy { + assertion.PolicyMap[strings.Join(policy, ",")] = i + } + } + return nil +} + +func getSubjectHierarchyMap(policies [][]string) (map[string]int, error) { + subjectHierarchyMap := make(map[string]int) + // Tree structure of role + policyMap := make(map[string][]string) + for _, policy := range policies { + if len(policy) < 2 { + return nil, errors.New("policy g expect 2 more params") + } + domain := defaultDomain + if len(policy) != 2 { + domain = policy[2] + } + child := getNameWithDomain(domain, policy[0]) + parent := getNameWithDomain(domain, policy[1]) + policyMap[parent] = append(policyMap[parent], child) + if _, ok := subjectHierarchyMap[child]; !ok { + subjectHierarchyMap[child] = 0 + } + if _, ok := subjectHierarchyMap[parent]; !ok { + subjectHierarchyMap[parent] = 0 + } + subjectHierarchyMap[child] = 1 + } + // Use queues for levelOrder + queue := list.New() + for k, v := range subjectHierarchyMap { + root := k + if v != 0 { + continue + } + lv := 0 + queue.PushBack(root) + for queue.Len() != 0 { + sz := queue.Len() + for i := 0; i < sz; i++ { + node := queue.Front() + queue.Remove(node) + nodeValue := node.Value.(string) + subjectHierarchyMap[nodeValue] = lv + if _, ok := policyMap[nodeValue]; ok { + for _, child := range policyMap[nodeValue] { + queue.PushBack(child) + } + } + } + lv++ + } + } + return subjectHierarchyMap, nil +} + +func getNameWithDomain(domain string, name string) string { + return domain + defaultSeparator + name +} + +func (model Model) SortPoliciesByPriority() error { + for ptype, assertion := range model["p"] { + priorityIndex, err := model.GetFieldIndex(ptype, constant.PriorityIndex) + if err != nil { + continue + } + policies := assertion.Policy + sort.SliceStable(policies, func(i, j int) bool { + p1, err := strconv.Atoi(policies[i][priorityIndex]) + if err != nil { + return true + } + p2, err := strconv.Atoi(policies[j][priorityIndex]) + if err != nil { + return true + } + return p1 < p2 + }) + for i, policy := range assertion.Policy { + assertion.PolicyMap[strings.Join(policy, ",")] = i + } + } + return nil +} + +var ( + pPattern = regexp.MustCompile("^p_") + rPattern = regexp.MustCompile("^r_") +) + +func (model Model) ToText() string { + tokenPatterns := make(map[string]string) + + for _, ptype := range []string{"r", "p"} { + for _, token := range model[ptype][ptype].Tokens { + tokenPatterns[token] = rPattern.ReplaceAllString(pPattern.ReplaceAllString(token, "p."), "r.") + } + } + if strings.Contains(model["e"]["e"].Value, "p_eft") { + tokenPatterns["p_eft"] = "p.eft" + } + s := strings.Builder{} + writeString := func(sec string) { + for ptype := range model[sec] { + value := model[sec][ptype].Value + for tokenPattern, newToken := range tokenPatterns { + value = strings.Replace(value, tokenPattern, newToken, -1) + } + s.WriteString(fmt.Sprintf("%s = %s\n", sec, value)) + } + } + s.WriteString("[request_definition]\n") + writeString("r") + s.WriteString("[policy_definition]\n") + writeString("p") + if _, ok := model["g"]; ok { + s.WriteString("[role_definition]\n") + for ptype := range model["g"] { + s.WriteString(fmt.Sprintf("%s = %s\n", ptype, model["g"][ptype].Value)) + } + } + s.WriteString("[policy_effect]\n") + writeString("e") + s.WriteString("[matchers]\n") + writeString("m") + return s.String() +} + +func (model Model) Copy() Model { + newModel := NewModel() + + for sec, m := range model { + newAstMap := make(AssertionMap) + for ptype, ast := range m { + newAstMap[ptype] = ast.copy() + } + newModel[sec] = newAstMap + } + + newModel.SetLogger(model.GetLogger()) + return newModel +} + +func (model Model) GetFieldIndex(ptype string, field string) (int, error) { + assertion := model["p"][ptype] + if index, ok := assertion.FieldIndexMap[field]; ok { + return index, nil + } + pattern := fmt.Sprintf("%s_"+field, ptype) + index := -1 + for i, token := range assertion.Tokens { + if token == pattern { + index = i + break + } + } + if index == -1 { + return index, fmt.Errorf(field + " index is not set, please use enforcer.SetFieldIndex() to set index") + } + assertion.FieldIndexMap[field] = index + return index, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/model/policy.go b/vendor/github.com/casbin/casbin/v2/model/policy.go new file mode 100644 index 000000000..875da0901 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/model/policy.go @@ -0,0 +1,482 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" + "strings" + + "github.com/casbin/casbin/v2/constant" + "github.com/casbin/casbin/v2/rbac" + "github.com/casbin/casbin/v2/util" +) + +type ( + PolicyOp int +) + +const ( + PolicyAdd PolicyOp = iota + PolicyRemove +) + +const DefaultSep = "," + +// BuildIncrementalRoleLinks provides incremental build the role inheritance relations. +func (model Model) BuildIncrementalRoleLinks(rmMap map[string]rbac.RoleManager, op PolicyOp, sec string, ptype string, rules [][]string) error { + if sec == "g" && rmMap[ptype] != nil { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return err + } + return model[sec][ptype].buildIncrementalRoleLinks(rmMap[ptype], op, rules) + } + return nil +} + +// BuildRoleLinks initializes the roles in RBAC. +func (model Model) BuildRoleLinks(rmMap map[string]rbac.RoleManager) error { + model.PrintPolicy() + for ptype, ast := range model["g"] { + if rm := rmMap[ptype]; rm != nil { + err := ast.buildRoleLinks(rm) + if err != nil { + return err + } + } + } + + return nil +} + +// BuildIncrementalConditionalRoleLinks provides incremental build the role inheritance relations. +func (model Model) BuildIncrementalConditionalRoleLinks(condRmMap map[string]rbac.ConditionalRoleManager, op PolicyOp, sec string, ptype string, rules [][]string) error { + if sec == "g" && condRmMap[ptype] != nil { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return err + } + return model[sec][ptype].buildIncrementalConditionalRoleLinks(condRmMap[ptype], op, rules) + } + return nil +} + +// BuildConditionalRoleLinks initializes the roles in RBAC. +func (model Model) BuildConditionalRoleLinks(condRmMap map[string]rbac.ConditionalRoleManager) error { + model.PrintPolicy() + for ptype, ast := range model["g"] { + if condRm := condRmMap[ptype]; condRm != nil { + err := ast.buildConditionalRoleLinks(condRm) + if err != nil { + return err + } + } + } + + return nil +} + +// PrintPolicy prints the policy to log. +func (model Model) PrintPolicy() { + if !model.GetLogger().IsEnabled() { + return + } + + policy := make(map[string][][]string) + + for key, ast := range model["p"] { + value, found := policy[key] + if found { + value = append(value, ast.Policy...) + policy[key] = value + } else { + policy[key] = ast.Policy + } + } + + for key, ast := range model["g"] { + value, found := policy[key] + if found { + value = append(value, ast.Policy...) + policy[key] = value + } else { + policy[key] = ast.Policy + } + } + + model.GetLogger().LogPolicy(policy) +} + +// ClearPolicy clears all current policy. +func (model Model) ClearPolicy() { + for _, ast := range model["p"] { + ast.Policy = nil + ast.PolicyMap = map[string]int{} + } + + for _, ast := range model["g"] { + ast.Policy = nil + ast.PolicyMap = map[string]int{} + } +} + +// GetPolicy gets all rules in a policy. +func (model Model) GetPolicy(sec string, ptype string) ([][]string, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return nil, err + } + return model[sec][ptype].Policy, nil +} + +// GetFilteredPolicy gets rules based on field filters from a policy. +func (model Model) GetFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) ([][]string, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return nil, err + } + res := [][]string{} + + for _, rule := range model[sec][ptype].Policy { + matched := true + for i, fieldValue := range fieldValues { + if fieldValue != "" && rule[fieldIndex+i] != fieldValue { + matched = false + break + } + } + + if matched { + res = append(res, rule) + } + } + + return res, nil +} + +// HasPolicyEx determines whether a model has the specified policy rule with error. +func (model Model) HasPolicyEx(sec string, ptype string, rule []string) (bool, error) { + assertion, err := model.GetAssertion(sec, ptype) + if err != nil { + return false, err + } + switch sec { + case "p": + if len(rule) != len(assertion.Tokens) { + return false, fmt.Errorf( + "invalid policy rule size: expected %d, got %d, rule: %v", + len(model["p"][ptype].Tokens), + len(rule), + rule) + } + case "g": + if len(rule) < len(assertion.Tokens) { + return false, fmt.Errorf( + "invalid policy rule size: expected %d, got %d, rule: %v", + len(model["g"][ptype].Tokens), + len(rule), + rule) + } + } + return model.HasPolicy(sec, ptype, rule) +} + +// HasPolicy determines whether a model has the specified policy rule. +func (model Model) HasPolicy(sec string, ptype string, rule []string) (bool, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return false, err + } + _, ok := model[sec][ptype].PolicyMap[strings.Join(rule, DefaultSep)] + return ok, nil +} + +// HasPolicies determines whether a model has any of the specified policies. If one is found we return true. +func (model Model) HasPolicies(sec string, ptype string, rules [][]string) (bool, error) { + for i := 0; i < len(rules); i++ { + ok, err := model.HasPolicy(sec, ptype, rules[i]) + if err != nil { + return false, err + } + if ok { + return true, nil + } + } + + return false, nil +} + +// AddPolicy adds a policy rule to the model. +func (model Model) AddPolicy(sec string, ptype string, rule []string) error { + assertion, err := model.GetAssertion(sec, ptype) + if err != nil { + return err + } + assertion.Policy = append(assertion.Policy, rule) + assertion.PolicyMap[strings.Join(rule, DefaultSep)] = len(model[sec][ptype].Policy) - 1 + + hasPriority := false + if _, ok := assertion.FieldIndexMap[constant.PriorityIndex]; ok { + hasPriority = true + } + if sec == "p" && hasPriority { + if idxInsert, err := strconv.Atoi(rule[assertion.FieldIndexMap[constant.PriorityIndex]]); err == nil { + i := len(assertion.Policy) - 1 + for ; i > 0; i-- { + idx, err := strconv.Atoi(assertion.Policy[i-1][assertion.FieldIndexMap[constant.PriorityIndex]]) + if err != nil || idx <= idxInsert { + break + } + assertion.Policy[i] = assertion.Policy[i-1] + assertion.PolicyMap[strings.Join(assertion.Policy[i-1], DefaultSep)]++ + } + assertion.Policy[i] = rule + assertion.PolicyMap[strings.Join(rule, DefaultSep)] = i + } + } + return nil +} + +// AddPolicies adds policy rules to the model. +func (model Model) AddPolicies(sec string, ptype string, rules [][]string) error { + _, err := model.AddPoliciesWithAffected(sec, ptype, rules) + return err +} + +// AddPoliciesWithAffected adds policy rules to the model, and returns affected rules. +func (model Model) AddPoliciesWithAffected(sec string, ptype string, rules [][]string) ([][]string, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return nil, err + } + var affected [][]string + for _, rule := range rules { + hashKey := strings.Join(rule, DefaultSep) + _, ok := model[sec][ptype].PolicyMap[hashKey] + if ok { + continue + } + affected = append(affected, rule) + err = model.AddPolicy(sec, ptype, rule) + if err != nil { + return affected, err + } + } + return affected, err +} + +// RemovePolicy removes a policy rule from the model. +// Deprecated: Using AddPoliciesWithAffected instead. +func (model Model) RemovePolicy(sec string, ptype string, rule []string) (bool, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return false, err + } + index, ok := model[sec][ptype].PolicyMap[strings.Join(rule, DefaultSep)] + if !ok { + return false, err + } + + model[sec][ptype].Policy = append(model[sec][ptype].Policy[:index], model[sec][ptype].Policy[index+1:]...) + delete(model[sec][ptype].PolicyMap, strings.Join(rule, DefaultSep)) + for i := index; i < len(model[sec][ptype].Policy); i++ { + model[sec][ptype].PolicyMap[strings.Join(model[sec][ptype].Policy[i], DefaultSep)] = i + } + + return true, err +} + +// UpdatePolicy updates a policy rule from the model. +func (model Model) UpdatePolicy(sec string, ptype string, oldRule []string, newRule []string) (bool, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return false, err + } + oldPolicy := strings.Join(oldRule, DefaultSep) + index, ok := model[sec][ptype].PolicyMap[oldPolicy] + if !ok { + return false, nil + } + + model[sec][ptype].Policy[index] = newRule + delete(model[sec][ptype].PolicyMap, oldPolicy) + model[sec][ptype].PolicyMap[strings.Join(newRule, DefaultSep)] = index + + return true, nil +} + +// UpdatePolicies updates a policy rule from the model. +func (model Model) UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) (bool, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return false, err + } + rollbackFlag := false + // index -> []{oldIndex, newIndex} + modifiedRuleIndex := make(map[int][]int) + // rollback + defer func() { + if rollbackFlag { + for index, oldNewIndex := range modifiedRuleIndex { + model[sec][ptype].Policy[index] = oldRules[oldNewIndex[0]] + oldPolicy := strings.Join(oldRules[oldNewIndex[0]], DefaultSep) + newPolicy := strings.Join(newRules[oldNewIndex[1]], DefaultSep) + delete(model[sec][ptype].PolicyMap, newPolicy) + model[sec][ptype].PolicyMap[oldPolicy] = index + } + } + }() + + newIndex := 0 + for oldIndex, oldRule := range oldRules { + oldPolicy := strings.Join(oldRule, DefaultSep) + index, ok := model[sec][ptype].PolicyMap[oldPolicy] + if !ok { + rollbackFlag = true + return false, nil + } + + model[sec][ptype].Policy[index] = newRules[newIndex] + delete(model[sec][ptype].PolicyMap, oldPolicy) + model[sec][ptype].PolicyMap[strings.Join(newRules[newIndex], DefaultSep)] = index + modifiedRuleIndex[index] = []int{oldIndex, newIndex} + newIndex++ + } + + return true, nil +} + +// RemovePolicies removes policy rules from the model. +func (model Model) RemovePolicies(sec string, ptype string, rules [][]string) (bool, error) { + affected, err := model.RemovePoliciesWithAffected(sec, ptype, rules) + return len(affected) != 0, err +} + +// RemovePoliciesWithAffected removes policy rules from the model, and returns affected rules. +func (model Model) RemovePoliciesWithAffected(sec string, ptype string, rules [][]string) ([][]string, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return nil, err + } + var affected [][]string + for _, rule := range rules { + index, ok := model[sec][ptype].PolicyMap[strings.Join(rule, DefaultSep)] + if !ok { + continue + } + + affected = append(affected, rule) + model[sec][ptype].Policy = append(model[sec][ptype].Policy[:index], model[sec][ptype].Policy[index+1:]...) + delete(model[sec][ptype].PolicyMap, strings.Join(rule, DefaultSep)) + for i := index; i < len(model[sec][ptype].Policy); i++ { + model[sec][ptype].PolicyMap[strings.Join(model[sec][ptype].Policy[i], DefaultSep)] = i + } + } + return affected, nil +} + +// RemoveFilteredPolicy removes policy rules based on field filters from the model. +func (model Model) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) (bool, [][]string, error) { + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return false, nil, err + } + var tmp [][]string + var effects [][]string + res := false + model[sec][ptype].PolicyMap = map[string]int{} + + for _, rule := range model[sec][ptype].Policy { + matched := true + for i, fieldValue := range fieldValues { + if fieldValue != "" && rule[fieldIndex+i] != fieldValue { + matched = false + break + } + } + + if matched { + effects = append(effects, rule) + } else { + tmp = append(tmp, rule) + model[sec][ptype].PolicyMap[strings.Join(rule, DefaultSep)] = len(tmp) - 1 + } + } + + if len(tmp) != len(model[sec][ptype].Policy) { + model[sec][ptype].Policy = tmp + res = true + } + + return res, effects, nil +} + +// GetValuesForFieldInPolicy gets all values for a field for all rules in a policy, duplicated values are removed. +func (model Model) GetValuesForFieldInPolicy(sec string, ptype string, fieldIndex int) ([]string, error) { + values := []string{} + + _, err := model.GetAssertion(sec, ptype) + if err != nil { + return nil, err + } + + for _, rule := range model[sec][ptype].Policy { + values = append(values, rule[fieldIndex]) + } + + util.ArrayRemoveDuplicates(&values) + + return values, nil +} + +// GetValuesForFieldInPolicyAllTypes gets all values for a field for all rules in a policy of all ptypes, duplicated values are removed. +func (model Model) GetValuesForFieldInPolicyAllTypes(sec string, fieldIndex int) ([]string, error) { + values := []string{} + + for ptype := range model[sec] { + v, err := model.GetValuesForFieldInPolicy(sec, ptype, fieldIndex) + if err != nil { + return nil, err + } + values = append(values, v...) + } + + util.ArrayRemoveDuplicates(&values) + + return values, nil +} + +// GetValuesForFieldInPolicyAllTypesByName gets all values for a field for all rules in a policy of all ptypes, duplicated values are removed. +func (model Model) GetValuesForFieldInPolicyAllTypesByName(sec string, field string) ([]string, error) { + values := []string{} + + for ptype := range model[sec] { + // GetFieldIndex will return (-1, err) if field is not found, ignore it + index, err := model.GetFieldIndex(ptype, field) + if err != nil { + continue + } + v, err := model.GetValuesForFieldInPolicy(sec, ptype, index) + if err != nil { + return nil, err + } + values = append(values, v...) + } + + util.ArrayRemoveDuplicates(&values) + + return values, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter.go b/vendor/github.com/casbin/casbin/v2/persist/adapter.go new file mode 100644 index 000000000..455871768 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/adapter.go @@ -0,0 +1,79 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import ( + "encoding/csv" + "strings" + + "github.com/casbin/casbin/v2/model" +) + +// LoadPolicyLine loads a text line as a policy rule to model. +func LoadPolicyLine(line string, m model.Model) error { + if line == "" || strings.HasPrefix(line, "#") { + return nil + } + + r := csv.NewReader(strings.NewReader(line)) + r.Comma = ',' + r.Comment = '#' + r.TrimLeadingSpace = true + + tokens, err := r.Read() + if err != nil { + return err + } + + return LoadPolicyArray(tokens, m) +} + +// LoadPolicyArray loads a policy rule to model. +func LoadPolicyArray(rule []string, m model.Model) error { + key := rule[0] + sec := key[:1] + ok, err := m.HasPolicyEx(sec, key, rule[1:]) + if err != nil { + return err + } + if ok { + return nil // skip duplicated policy + } + + err = m.AddPolicy(sec, key, rule[1:]) + if err != nil { + return err + } + + return nil +} + +// Adapter is the interface for Casbin adapters. +type Adapter interface { + // LoadPolicy loads all policy rules from the storage. + LoadPolicy(model model.Model) error + // SavePolicy saves all policy rules to the storage. + SavePolicy(model model.Model) error + + // AddPolicy adds a policy rule to the storage. + // This is part of the Auto-Save feature. + AddPolicy(sec string, ptype string, rule []string) error + // RemovePolicy removes a policy rule from the storage. + // This is part of the Auto-Save feature. + RemovePolicy(sec string, ptype string, rule []string) error + // RemoveFilteredPolicy removes policy rules that match the filter from the storage. + // This is part of the Auto-Save feature. + RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter_context.go b/vendor/github.com/casbin/casbin/v2/persist/adapter_context.go new file mode 100644 index 000000000..bda78a7e2 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/adapter_context.go @@ -0,0 +1,39 @@ +// Copyright 2023 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import ( + "context" + + "github.com/casbin/casbin/v2/model" +) + +// ContextAdapter provides a context-aware interface for Casbin adapters. +type ContextAdapter interface { + // LoadPolicyCtx loads all policy rules from the storage with context. + LoadPolicyCtx(ctx context.Context, model model.Model) error + // SavePolicyCtx saves all policy rules to the storage with context. + SavePolicyCtx(ctx context.Context, model model.Model) error + + // AddPolicyCtx adds a policy rule to the storage with context. + // This is part of the Auto-Save feature. + AddPolicyCtx(ctx context.Context, sec string, ptype string, rule []string) error + // RemovePolicyCtx removes a policy rule from the storage with context. + // This is part of the Auto-Save feature. + RemovePolicyCtx(ctx context.Context, sec string, ptype string, rule []string) error + // RemoveFilteredPolicyCtx removes policy rules that match the filter from the storage with context. + // This is part of the Auto-Save feature. + RemoveFilteredPolicyCtx(ctx context.Context, sec string, ptype string, fieldIndex int, fieldValues ...string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered.go b/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered.go new file mode 100644 index 000000000..82c9a0e7c --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered.go @@ -0,0 +1,29 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import ( + "github.com/casbin/casbin/v2/model" +) + +// FilteredAdapter is the interface for Casbin adapters supporting filtered policies. +type FilteredAdapter interface { + Adapter + + // LoadFilteredPolicy loads only policy rules that match the filter. + LoadFilteredPolicy(model model.Model, filter interface{}) error + // IsFiltered returns true if the loaded policy has been filtered. + IsFiltered() bool +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered_context.go b/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered_context.go new file mode 100644 index 000000000..7893ce1bd --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/adapter_filtered_context.go @@ -0,0 +1,31 @@ +// Copyright 2024 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import ( + "context" + + "github.com/casbin/casbin/v2/model" +) + +// ContextFilteredAdapter is the context-aware interface for Casbin adapters supporting filtered policies. +type ContextFilteredAdapter interface { + ContextAdapter + + // LoadFilteredPolicyCtx loads only policy rules that match the filter. + LoadFilteredPolicyCtx(ctx context.Context, model model.Model, filter interface{}) error + // IsFilteredCtx returns true if the loaded policy has been filtered. + IsFilteredCtx(ctx context.Context) bool +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/batch_adapter.go b/vendor/github.com/casbin/casbin/v2/persist/batch_adapter.go new file mode 100644 index 000000000..56ec415fe --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/batch_adapter.go @@ -0,0 +1,26 @@ +// Copyright 2020 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +// BatchAdapter is the interface for Casbin adapters with multiple add and remove policy functions. +type BatchAdapter interface { + Adapter + // AddPolicies adds policy rules to the storage. + // This is part of the Auto-Save feature. + AddPolicies(sec string, ptype string, rules [][]string) error + // RemovePolicies removes policy rules from the storage. + // This is part of the Auto-Save feature. + RemovePolicies(sec string, ptype string, rules [][]string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/batch_adapter_context.go b/vendor/github.com/casbin/casbin/v2/persist/batch_adapter_context.go new file mode 100644 index 000000000..741c184d6 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/batch_adapter_context.go @@ -0,0 +1,29 @@ +// Copyright 2024 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import "context" + +// ContextBatchAdapter is the context-aware interface for Casbin adapters with multiple add and remove policy functions. +type ContextBatchAdapter interface { + ContextAdapter + + // AddPoliciesCtx adds policy rules to the storage. + // This is part of the Auto-Save feature. + AddPoliciesCtx(ctx context.Context, sec string, ptype string, rules [][]string) error + // RemovePoliciesCtx removes policy rules from the storage. + // This is part of the Auto-Save feature. + RemovePoliciesCtx(ctx context.Context, sec string, ptype string, rules [][]string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/cache/cache.go b/vendor/github.com/casbin/casbin/v2/persist/cache/cache.go new file mode 100644 index 000000000..08447b83c --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/cache/cache.go @@ -0,0 +1,39 @@ +// Copyright 2021 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import "errors" + +var ErrNoSuchKey = errors.New("there's no such key existing in cache") + +type Cache interface { + // Set puts key and value into cache. + // First parameter for extra should be time.Time object denoting expected survival time. + // If survival time equals 0 or less, the key will always be survival. + Set(key string, value bool, extra ...interface{}) error + + // Get returns result for key, + // If there's no such key existing in cache, + // ErrNoSuchKey will be returned. + Get(key string) (bool, error) + + // Delete will remove the specific key in cache. + // If there's no such key existing in cache, + // ErrNoSuchKey will be returned. + Delete(key string) error + + // Clear deletes all the items stored in cache. + Clear() error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/cache/cache_sync.go b/vendor/github.com/casbin/casbin/v2/persist/cache/cache_sync.go new file mode 100644 index 000000000..816e12dcc --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/cache/cache_sync.go @@ -0,0 +1,86 @@ +// Copyright 2021 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import ( + "sync" + "time" +) + +type SyncCache struct { + cache DefaultCache + sync.RWMutex +} + +func (c *SyncCache) Set(key string, value bool, extra ...interface{}) error { + ttl := time.Duration(-1) + if len(extra) > 0 { + ttl = extra[0].(time.Duration) + } + c.Lock() + defer c.Unlock() + c.cache[key] = cacheItem{ + value: value, + expiresAt: time.Now().Add(ttl), + ttl: ttl, + } + return nil +} + +func (c *SyncCache) Get(key string) (bool, error) { + c.RLock() + res, ok := c.cache[key] + c.RUnlock() + if !ok { + return false, ErrNoSuchKey + } else { + if res.ttl > 0 && time.Now().After(res.expiresAt) { + c.Lock() + defer c.Unlock() + delete(c.cache, key) + return false, ErrNoSuchKey + } + return res.value, nil + } +} + +func (c *SyncCache) Delete(key string) error { + c.RLock() + _, ok := c.cache[key] + c.RUnlock() + if !ok { + return ErrNoSuchKey + } else { + c.Lock() + defer c.Unlock() + delete(c.cache, key) + return nil + } +} + +func (c *SyncCache) Clear() error { + c.Lock() + c.cache = make(DefaultCache) + c.Unlock() + return nil +} + +func NewSyncCache() (Cache, error) { + cache := SyncCache{ + make(DefaultCache), + sync.RWMutex{}, + } + return &cache, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/cache/default-cache.go b/vendor/github.com/casbin/casbin/v2/persist/cache/default-cache.go new file mode 100644 index 000000000..9108e7d64 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/cache/default-cache.go @@ -0,0 +1,69 @@ +// Copyright 2021 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cache + +import "time" + +type cacheItem struct { + value bool + expiresAt time.Time + ttl time.Duration +} + +type DefaultCache map[string]cacheItem + +func (c *DefaultCache) Set(key string, value bool, extra ...interface{}) error { + ttl := time.Duration(-1) + if len(extra) > 0 { + ttl = extra[0].(time.Duration) + } + (*c)[key] = cacheItem{ + value: value, + expiresAt: time.Now().Add(ttl), + ttl: ttl, + } + return nil +} + +func (c *DefaultCache) Get(key string) (bool, error) { + if res, ok := (*c)[key]; !ok { + return false, ErrNoSuchKey + } else { + if res.ttl > 0 && time.Now().After(res.expiresAt) { + delete(*c, key) + return false, ErrNoSuchKey + } + return res.value, nil + } +} + +func (c *DefaultCache) Delete(key string) error { + if _, ok := (*c)[key]; !ok { + return ErrNoSuchKey + } else { + delete(*c, key) + return nil + } +} + +func (c *DefaultCache) Clear() error { + *c = make(DefaultCache) + return nil +} + +func NewDefaultCache() (Cache, error) { + cache := make(DefaultCache) + return &cache, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/dispatcher.go b/vendor/github.com/casbin/casbin/v2/persist/dispatcher.go new file mode 100644 index 000000000..ceaed8385 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/dispatcher.go @@ -0,0 +1,33 @@ +// Copyright 2020 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +// Dispatcher is the interface for Casbin dispatcher. +type Dispatcher interface { + // AddPolicies adds policies rule to all instance. + AddPolicies(sec string, ptype string, rules [][]string) error + // RemovePolicies removes policies rule from all instance. + RemovePolicies(sec string, ptype string, rules [][]string) error + // RemoveFilteredPolicy removes policy rules that match the filter from all instance. + RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error + // ClearPolicy clears all current policy in all instances + ClearPolicy() error + // UpdatePolicy updates policy rule from all instance. + UpdatePolicy(sec string, ptype string, oldRule, newRule []string) error + // UpdatePolicies updates some policy rules from all instance + UpdatePolicies(sec string, ptype string, oldrules, newRules [][]string) error + // UpdateFilteredPolicies deletes old rules and adds new rules. + UpdateFilteredPolicies(sec string, ptype string, oldRules [][]string, newRules [][]string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter.go b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter.go new file mode 100644 index 000000000..c68f0eaa4 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter.go @@ -0,0 +1,149 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileadapter + +import ( + "bufio" + "bytes" + "errors" + "os" + "strings" + + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" + "github.com/casbin/casbin/v2/util" +) + +// Adapter is the file adapter for Casbin. +// It can load policy from file or save policy to file. +type Adapter struct { + filePath string +} + +func (a *Adapter) UpdatePolicy(sec string, ptype string, oldRule, newRule []string) error { + return errors.New("not implemented") +} + +func (a *Adapter) UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error { + return errors.New("not implemented") +} + +func (a *Adapter) UpdateFilteredPolicies(sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) { + return nil, errors.New("not implemented") +} + +// NewAdapter is the constructor for Adapter. +func NewAdapter(filePath string) *Adapter { + return &Adapter{filePath: filePath} +} + +// LoadPolicy loads all policy rules from the storage. +func (a *Adapter) LoadPolicy(model model.Model) error { + if a.filePath == "" { + return errors.New("invalid file path, file path cannot be empty") + } + + return a.loadPolicyFile(model, persist.LoadPolicyLine) +} + +// SavePolicy saves all policy rules to the storage. +func (a *Adapter) SavePolicy(model model.Model) error { + if a.filePath == "" { + return errors.New("invalid file path, file path cannot be empty") + } + + var tmp bytes.Buffer + + for ptype, ast := range model["p"] { + for _, rule := range ast.Policy { + tmp.WriteString(ptype + ", ") + tmp.WriteString(util.ArrayToString(rule)) + tmp.WriteString("\n") + } + } + + for ptype, ast := range model["g"] { + for _, rule := range ast.Policy { + tmp.WriteString(ptype + ", ") + tmp.WriteString(util.ArrayToString(rule)) + tmp.WriteString("\n") + } + } + + return a.savePolicyFile(strings.TrimRight(tmp.String(), "\n")) +} + +func (a *Adapter) loadPolicyFile(model model.Model, handler func(string, model.Model) error) error { + f, err := os.Open(a.filePath) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + err = handler(line, model) + if err != nil { + return err + } + } + return scanner.Err() +} + +func (a *Adapter) savePolicyFile(text string) error { + f, err := os.Create(a.filePath) + if err != nil { + return err + } + w := bufio.NewWriter(f) + + _, err = w.WriteString(text) + if err != nil { + return err + } + + err = w.Flush() + if err != nil { + return err + } + + return f.Close() +} + +// AddPolicy adds a policy rule to the storage. +func (a *Adapter) AddPolicy(sec string, ptype string, rule []string) error { + return errors.New("not implemented") +} + +// AddPolicies adds policy rules to the storage. +func (a *Adapter) AddPolicies(sec string, ptype string, rules [][]string) error { + return errors.New("not implemented") +} + +// RemovePolicy removes a policy rule from the storage. +func (a *Adapter) RemovePolicy(sec string, ptype string, rule []string) error { + return errors.New("not implemented") +} + +// RemovePolicies removes policy rules from the storage. +func (a *Adapter) RemovePolicies(sec string, ptype string, rules [][]string) error { + return errors.New("not implemented") +} + +// RemoveFilteredPolicy removes policy rules that match the filter from the storage. +func (a *Adapter) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error { + return errors.New("not implemented") +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_filtered.go b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_filtered.go new file mode 100644 index 000000000..1a074c9a9 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_filtered.go @@ -0,0 +1,156 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileadapter + +import ( + "bufio" + "errors" + "os" + "strings" + + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" +) + +// FilteredAdapter is the filtered file adapter for Casbin. It can load policy +// from file or save policy to file and supports loading of filtered policies. +type FilteredAdapter struct { + *Adapter + filtered bool +} + +// Filter defines the filtering rules for a FilteredAdapter's policy. Empty values +// are ignored, but all others must match the filter. +type Filter struct { + P []string + G []string + G1 []string + G2 []string + G3 []string + G4 []string + G5 []string +} + +// NewFilteredAdapter is the constructor for FilteredAdapter. +func NewFilteredAdapter(filePath string) *FilteredAdapter { + a := FilteredAdapter{} + a.filtered = true + a.Adapter = NewAdapter(filePath) + return &a +} + +// LoadPolicy loads all policy rules from the storage. +func (a *FilteredAdapter) LoadPolicy(model model.Model) error { + a.filtered = false + return a.Adapter.LoadPolicy(model) +} + +// LoadFilteredPolicy loads only policy rules that match the filter. +func (a *FilteredAdapter) LoadFilteredPolicy(model model.Model, filter interface{}) error { + if filter == nil { + return a.LoadPolicy(model) + } + if a.filePath == "" { + return errors.New("invalid file path, file path cannot be empty") + } + + filterValue, ok := filter.(*Filter) + if !ok { + return errors.New("invalid filter type") + } + err := a.loadFilteredPolicyFile(model, filterValue, persist.LoadPolicyLine) + if err == nil { + a.filtered = true + } + return err +} + +func (a *FilteredAdapter) loadFilteredPolicyFile(model model.Model, filter *Filter, handler func(string, model.Model) error) error { + f, err := os.Open(a.filePath) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + + if filterLine(line, filter) { + continue + } + + err = handler(line, model) + if err != nil { + return err + } + } + return scanner.Err() +} + +// IsFiltered returns true if the loaded policy has been filtered. +func (a *FilteredAdapter) IsFiltered() bool { + return a.filtered +} + +// SavePolicy saves all policy rules to the storage. +func (a *FilteredAdapter) SavePolicy(model model.Model) error { + if a.filtered { + return errors.New("cannot save a filtered policy") + } + return a.Adapter.SavePolicy(model) +} + +func filterLine(line string, filter *Filter) bool { + if filter == nil { + return false + } + p := strings.Split(line, ",") + if len(p) == 0 { + return true + } + var filterSlice []string + switch strings.TrimSpace(p[0]) { + case "p": + filterSlice = filter.P + case "g": + filterSlice = filter.G + case "g1": + filterSlice = filter.G1 + case "g2": + filterSlice = filter.G2 + case "g3": + filterSlice = filter.G3 + case "g4": + filterSlice = filter.G4 + case "g5": + filterSlice = filter.G5 + } + return filterWords(p, filterSlice) +} + +func filterWords(line []string, filter []string) bool { + if len(line) < len(filter)+1 { + return true + } + var skipLine bool + for i, v := range filter { + if len(v) > 0 && strings.TrimSpace(v) != strings.TrimSpace(line[i+1]) { + skipLine = true + break + } + } + return skipLine +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_mock.go b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_mock.go new file mode 100644 index 000000000..fcc5f8218 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/file-adapter/adapter_mock.go @@ -0,0 +1,122 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fileadapter + +import ( + "bufio" + "errors" + "io" + "os" + "strings" + + "github.com/casbin/casbin/v2/model" + "github.com/casbin/casbin/v2/persist" +) + +// AdapterMock is the file adapter for Casbin. +// It can load policy from file or save policy to file. +type AdapterMock struct { + filePath string + errorValue string +} + +// NewAdapterMock is the constructor for AdapterMock. +func NewAdapterMock(filePath string) *AdapterMock { + a := AdapterMock{} + a.filePath = filePath + return &a +} + +// LoadPolicy loads all policy rules from the storage. +func (a *AdapterMock) LoadPolicy(model model.Model) error { + err := a.loadPolicyFile(model, persist.LoadPolicyLine) + return err +} + +// SavePolicy saves all policy rules to the storage. +func (a *AdapterMock) SavePolicy(model model.Model) error { + return nil +} + +func (a *AdapterMock) loadPolicyFile(model model.Model, handler func(string, model.Model) error) error { + f, err := os.Open(a.filePath) + if err != nil { + return err + } + defer f.Close() + + buf := bufio.NewReader(f) + for { + line, err := buf.ReadString('\n') + line = strings.TrimSpace(line) + if err2 := handler(line, model); err2 != nil { + return err2 + } + if err != nil { + if err == io.EOF { + return nil + } + return err + } + } +} + +// SetMockErr sets string to be returned by of the mock during testing. +func (a *AdapterMock) SetMockErr(errorToSet string) { + a.errorValue = errorToSet +} + +// GetMockErr returns a mock error or nil. +func (a *AdapterMock) GetMockErr() error { + var returnError error + if a.errorValue != "" { + returnError = errors.New(a.errorValue) + } + return returnError +} + +// AddPolicy adds a policy rule to the storage. +func (a *AdapterMock) AddPolicy(sec string, ptype string, rule []string) error { + return a.GetMockErr() +} + +// AddPolicies removes policy rules from the storage. +func (a *AdapterMock) AddPolicies(sec string, ptype string, rules [][]string) error { + return a.GetMockErr() +} + +// RemovePolicy removes a policy rule from the storage. +func (a *AdapterMock) RemovePolicy(sec string, ptype string, rule []string) error { + return a.GetMockErr() +} + +// RemovePolicies removes policy rules from the storage. +func (a *AdapterMock) RemovePolicies(sec string, ptype string, rules [][]string) error { + return a.GetMockErr() +} + +// UpdatePolicy removes a policy rule from the storage. +func (a *AdapterMock) UpdatePolicy(sec string, ptype string, oldRule, newPolicy []string) error { + return a.GetMockErr() +} + +func (a *AdapterMock) UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error { + return a.GetMockErr() +} + +// RemoveFilteredPolicy removes policy rules that match the filter from the storage. +func (a *AdapterMock) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error { + return a.GetMockErr() +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/update_adapter.go b/vendor/github.com/casbin/casbin/v2/persist/update_adapter.go new file mode 100644 index 000000000..fe9204afd --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/update_adapter.go @@ -0,0 +1,27 @@ +// Copyright 2020 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +// UpdatableAdapter is the interface for Casbin adapters with add update policy function. +type UpdatableAdapter interface { + Adapter + // UpdatePolicy updates a policy rule from storage. + // This is part of the Auto-Save feature. + UpdatePolicy(sec string, ptype string, oldRule, newRule []string) error + // UpdatePolicies updates some policy rules to storage, like db, redis. + UpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error + // UpdateFilteredPolicies deletes old rules and adds new rules. + UpdateFilteredPolicies(sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/update_adapter_context.go b/vendor/github.com/casbin/casbin/v2/persist/update_adapter_context.go new file mode 100644 index 000000000..55b8ba9df --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/update_adapter_context.go @@ -0,0 +1,30 @@ +// Copyright 2024 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import "context" + +// ContextUpdatableAdapter is the context-aware interface for Casbin adapters with add update policy function. +type ContextUpdatableAdapter interface { + ContextAdapter + + // UpdatePolicyCtx updates a policy rule from storage. + // This is part of the Auto-Save feature. + UpdatePolicyCtx(ctx context.Context, sec string, ptype string, oldRule, newRule []string) error + // UpdatePoliciesCtx updates some policy rules to storage, like db, redis. + UpdatePoliciesCtx(ctx context.Context, sec string, ptype string, oldRules, newRules [][]string) error + // UpdateFilteredPoliciesCtx deletes old rules and adds new rules. + UpdateFilteredPoliciesCtx(ctx context.Context, sec string, ptype string, newRules [][]string, fieldIndex int, fieldValues ...string) ([][]string, error) +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/watcher.go b/vendor/github.com/casbin/casbin/v2/persist/watcher.go new file mode 100644 index 000000000..0d843606b --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/watcher.go @@ -0,0 +1,29 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +// Watcher is the interface for Casbin watchers. +type Watcher interface { + // SetUpdateCallback sets the callback function that the watcher will call + // when the policy in DB has been changed by other instances. + // A classic callback is Enforcer.LoadPolicy(). + SetUpdateCallback(func(string)) error + // Update calls the update callback of other instances to synchronize their policy. + // It is usually called after changing the policy in DB, like Enforcer.SavePolicy(), + // Enforcer.AddPolicy(), Enforcer.RemovePolicy(), etc. + Update() error + // Close stops and releases the watcher, the callback function will not be called any more. + Close() +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/watcher_ex.go b/vendor/github.com/casbin/casbin/v2/persist/watcher_ex.go new file mode 100644 index 000000000..1c6f4299c --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/watcher_ex.go @@ -0,0 +1,40 @@ +// Copyright 2020 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import "github.com/casbin/casbin/v2/model" + +// WatcherEx is the strengthened Casbin watchers. +type WatcherEx interface { + Watcher + // UpdateForAddPolicy calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.AddPolicy() + UpdateForAddPolicy(sec, ptype string, params ...string) error + // UpdateForRemovePolicy calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.RemovePolicy() + UpdateForRemovePolicy(sec, ptype string, params ...string) error + // UpdateForRemoveFilteredPolicy calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.RemoveFilteredNamedGroupingPolicy() + UpdateForRemoveFilteredPolicy(sec, ptype string, fieldIndex int, fieldValues ...string) error + // UpdateForSavePolicy calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.RemoveFilteredNamedGroupingPolicy() + UpdateForSavePolicy(model model.Model) error + // UpdateForAddPolicies calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.AddPolicies() + UpdateForAddPolicies(sec string, ptype string, rules ...[]string) error + // UpdateForRemovePolicies calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.RemovePolicies() + UpdateForRemovePolicies(sec string, ptype string, rules ...[]string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/persist/watcher_update.go b/vendor/github.com/casbin/casbin/v2/persist/watcher_update.go new file mode 100644 index 000000000..694123c46 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/watcher_update.go @@ -0,0 +1,26 @@ +// Copyright 2020 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +// UpdatableWatcher is strengthened for Casbin watchers. +type UpdatableWatcher interface { + Watcher + // UpdateForUpdatePolicy calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.UpdatePolicy() + UpdateForUpdatePolicy(sec string, ptype string, oldRule, newRule []string) error + // UpdateForUpdatePolicies calls the update callback of other instances to synchronize their policy. + // It is called after Enforcer.UpdatePolicies() + UpdateForUpdatePolicies(sec string, ptype string, oldRules, newRules [][]string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac/context_role_manager.go b/vendor/github.com/casbin/casbin/v2/rbac/context_role_manager.go new file mode 100644 index 000000000..dcaa37f76 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac/context_role_manager.go @@ -0,0 +1,46 @@ +// Copyright 2023 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rbac + +import "context" + +// ContextRoleManager provides a context-aware interface to define the operations for managing roles. +// Prefer this over RoleManager interface for context propagation, which is useful for things like handling +// request timeouts. +type ContextRoleManager interface { + RoleManager + + // ClearCtx clears all stored data and resets the role manager to the initial state with context. + ClearCtx(ctx context.Context) error + // AddLinkCtx adds the inheritance link between two roles. role: name1 and role: name2 with context. + // domain is a prefix to the roles (can be used for other purposes). + AddLinkCtx(ctx context.Context, name1 string, name2 string, domain ...string) error + // DeleteLinkCtx deletes the inheritance link between two roles. role: name1 and role: name2 with context. + // domain is a prefix to the roles (can be used for other purposes). + DeleteLinkCtx(ctx context.Context, name1 string, name2 string, domain ...string) error + // HasLinkCtx determines whether a link exists between two roles. role: name1 inherits role: name2 with context. + // domain is a prefix to the roles (can be used for other purposes). + HasLinkCtx(ctx context.Context, name1 string, name2 string, domain ...string) (bool, error) + // GetRolesCtx gets the roles that a user inherits with context. + // domain is a prefix to the roles (can be used for other purposes). + GetRolesCtx(ctx context.Context, name string, domain ...string) ([]string, error) + // GetUsersCtx gets the users that inherits a role with context. + // domain is a prefix to the users (can be used for other purposes). + GetUsersCtx(ctx context.Context, name string, domain ...string) ([]string, error) + // GetDomainsCtx gets domains that a user has with context. + GetDomainsCtx(ctx context.Context, name string) ([]string, error) + // GetAllDomainsCtx gets all domains with context. + GetAllDomainsCtx(ctx context.Context) ([]string, error) +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go b/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go new file mode 100644 index 000000000..a6ae8693c --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go @@ -0,0 +1,1014 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package defaultrolemanager + +import ( + "fmt" + "strings" + "sync" + + "github.com/casbin/casbin/v2/log" + "github.com/casbin/casbin/v2/rbac" + "github.com/casbin/casbin/v2/util" +) + +const defaultDomain string = "" + +// Role represents the data structure for a role in RBAC. +type Role struct { + name string + roles *sync.Map + users *sync.Map + matched *sync.Map + matchedBy *sync.Map + linkConditionFuncMap *sync.Map + linkConditionFuncParamsMap *sync.Map +} + +func newRole(name string) *Role { + r := Role{} + r.name = name + r.roles = &sync.Map{} + r.users = &sync.Map{} + r.matched = &sync.Map{} + r.matchedBy = &sync.Map{} + r.linkConditionFuncMap = &sync.Map{} + r.linkConditionFuncParamsMap = &sync.Map{} + return &r +} + +func (r *Role) addRole(role *Role) { + r.roles.Store(role.name, role) + role.addUser(r) +} + +func (r *Role) removeRole(role *Role) { + r.roles.Delete(role.name) + role.removeUser(r) +} + +// should only be called inside addRole. +func (r *Role) addUser(user *Role) { + r.users.Store(user.name, user) +} + +// should only be called inside removeRole. +func (r *Role) removeUser(user *Role) { + r.users.Delete(user.name) +} + +func (r *Role) addMatch(role *Role) { + r.matched.Store(role.name, role) + role.matchedBy.Store(r.name, r) +} + +func (r *Role) removeMatch(role *Role) { + r.matched.Delete(role.name) + role.matchedBy.Delete(r.name) +} + +func (r *Role) removeMatches() { + r.matched.Range(func(key, value interface{}) bool { + r.removeMatch(value.(*Role)) + return true + }) + r.matchedBy.Range(func(key, value interface{}) bool { + value.(*Role).removeMatch(r) + return true + }) +} + +func (r *Role) rangeRoles(fn func(key, value interface{}) bool) { + r.roles.Range(fn) + r.roles.Range(func(key, value interface{}) bool { + role := value.(*Role) + role.matched.Range(fn) + return true + }) + r.matchedBy.Range(func(key, value interface{}) bool { + role := value.(*Role) + role.roles.Range(fn) + return true + }) +} + +func (r *Role) rangeUsers(fn func(key, value interface{}) bool) { + r.users.Range(fn) + r.users.Range(func(key, value interface{}) bool { + role := value.(*Role) + role.matched.Range(fn) + return true + }) + r.matchedBy.Range(func(key, value interface{}) bool { + role := value.(*Role) + role.users.Range(fn) + return true + }) +} + +func (r *Role) toString() string { + roles := r.getRoles() + + if len(roles) == 0 { + return "" + } + + var sb strings.Builder + sb.WriteString(r.name) + sb.WriteString(" < ") + if len(roles) != 1 { + sb.WriteString("(") + } + + for i, role := range roles { + if i == 0 { + sb.WriteString(role) + } else { + sb.WriteString(", ") + sb.WriteString(role) + } + } + + if len(roles) != 1 { + sb.WriteString(")") + } + + return sb.String() +} + +func (r *Role) getRoles() []string { + var names []string + r.rangeRoles(func(key, value interface{}) bool { + names = append(names, key.(string)) + return true + }) + return util.RemoveDuplicateElement(names) +} + +func (r *Role) getUsers() []string { + var names []string + r.rangeUsers(func(key, value interface{}) bool { + names = append(names, key.(string)) + return true + }) + return names +} + +type linkConditionFuncKey struct { + roleName string + domainName string +} + +func (r *Role) addLinkConditionFunc(role *Role, domain string, fn rbac.LinkConditionFunc) { + r.linkConditionFuncMap.Store(linkConditionFuncKey{role.name, domain}, fn) +} + +func (r *Role) getLinkConditionFunc(role *Role, domain string) (rbac.LinkConditionFunc, bool) { + fn, ok := r.linkConditionFuncMap.Load(linkConditionFuncKey{role.name, domain}) + if fn == nil { + return nil, ok + } + return fn.(rbac.LinkConditionFunc), ok +} + +func (r *Role) setLinkConditionFuncParams(role *Role, domain string, params ...string) { + r.linkConditionFuncParamsMap.Store(linkConditionFuncKey{role.name, domain}, params) +} + +func (r *Role) getLinkConditionFuncParams(role *Role, domain string) ([]string, bool) { + params, ok := r.linkConditionFuncParamsMap.Load(linkConditionFuncKey{role.name, domain}) + if params == nil { + return nil, ok + } + return params.([]string), ok +} + +// RoleManagerImpl provides a default implementation for the RoleManager interface. +type RoleManagerImpl struct { + allRoles *sync.Map + maxHierarchyLevel int + matchingFunc rbac.MatchingFunc + domainMatchingFunc rbac.MatchingFunc + logger log.Logger + matchingFuncCache *util.SyncLRUCache +} + +// NewRoleManagerImpl is the constructor for creating an instance of the +// default RoleManager implementation. +func NewRoleManagerImpl(maxHierarchyLevel int) *RoleManagerImpl { + rm := RoleManagerImpl{} + _ = rm.Clear() // init allRoles and matchingFuncCache + rm.maxHierarchyLevel = maxHierarchyLevel + rm.SetLogger(&log.DefaultLogger{}) + return &rm +} + +// use this constructor to avoid rebuild of AddMatchingFunc. +func newRoleManagerWithMatchingFunc(maxHierarchyLevel int, fn rbac.MatchingFunc) *RoleManagerImpl { + rm := NewRoleManagerImpl(maxHierarchyLevel) + rm.matchingFunc = fn + return rm +} + +// rebuilds role cache. +func (rm *RoleManagerImpl) rebuild() { + roles := rm.allRoles + _ = rm.Clear() + rangeLinks(roles, func(name1, name2 string, domain ...string) bool { + _ = rm.AddLink(name1, name2, domain...) + return true + }) +} + +func (rm *RoleManagerImpl) Match(str string, pattern string) bool { + if str == pattern { + return true + } + + if rm.matchingFunc != nil { + return rm.matchingFunc(str, pattern) + } else { + return false + } +} + +func (rm *RoleManagerImpl) rangeMatchingRoles(name string, isPattern bool, fn func(role *Role) bool) { + rm.allRoles.Range(func(key, value interface{}) bool { + name2 := key.(string) + if isPattern && name != name2 && rm.Match(name2, name) { + fn(value.(*Role)) + } else if !isPattern && name != name2 && rm.Match(name, name2) { + fn(value.(*Role)) + } + return true + }) +} + +func (rm *RoleManagerImpl) load(name interface{}) (value *Role, ok bool) { + if r, ok := rm.allRoles.Load(name); ok { + return r.(*Role), true + } + return nil, false +} + +// loads or creates a role. +func (rm *RoleManagerImpl) getRole(name string) (r *Role, created bool) { + var role *Role + var ok bool + + if role, ok = rm.load(name); !ok { + role = newRole(name) + rm.allRoles.Store(name, role) + + if rm.matchingFunc != nil { + rm.rangeMatchingRoles(name, false, func(r *Role) bool { + r.addMatch(role) + return true + }) + + rm.rangeMatchingRoles(name, true, func(r *Role) bool { + role.addMatch(r) + return true + }) + } + } + + return role, !ok +} + +func loadAndDelete(m *sync.Map, name string) (value interface{}, loaded bool) { + value, loaded = m.Load(name) + if loaded { + m.Delete(name) + } + return value, loaded +} + +func (rm *RoleManagerImpl) removeRole(name string) { + if role, ok := loadAndDelete(rm.allRoles, name); ok { + role.(*Role).removeMatches() + } +} + +// AddMatchingFunc support use pattern in g. +func (rm *RoleManagerImpl) AddMatchingFunc(name string, fn rbac.MatchingFunc) { + rm.matchingFunc = fn + rm.rebuild() +} + +// AddDomainMatchingFunc support use domain pattern in g. +func (rm *RoleManagerImpl) AddDomainMatchingFunc(name string, fn rbac.MatchingFunc) { + rm.domainMatchingFunc = fn +} + +// SetLogger sets role manager's logger. +func (rm *RoleManagerImpl) SetLogger(logger log.Logger) { + rm.logger = logger +} + +// Clear clears all stored data and resets the role manager to the initial state. +func (rm *RoleManagerImpl) Clear() error { + rm.matchingFuncCache = util.NewSyncLRUCache(100) + rm.allRoles = &sync.Map{} + return nil +} + +// AddLink adds the inheritance link between role: name1 and role: name2. +// aka role: name1 inherits role: name2. +func (rm *RoleManagerImpl) AddLink(name1 string, name2 string, domains ...string) error { + user, _ := rm.getRole(name1) + role, _ := rm.getRole(name2) + user.addRole(role) + return nil +} + +// DeleteLink deletes the inheritance link between role: name1 and role: name2. +// aka role: name1 does not inherit role: name2 any more. +func (rm *RoleManagerImpl) DeleteLink(name1 string, name2 string, domains ...string) error { + user, _ := rm.getRole(name1) + role, _ := rm.getRole(name2) + user.removeRole(role) + return nil +} + +// HasLink determines whether role: name1 inherits role: name2. +func (rm *RoleManagerImpl) HasLink(name1 string, name2 string, domains ...string) (bool, error) { + if name1 == name2 || (rm.matchingFunc != nil && rm.Match(name1, name2)) { + return true, nil + } + + user, userCreated := rm.getRole(name1) + role, roleCreated := rm.getRole(name2) + + if userCreated { + defer rm.removeRole(user.name) + } + if roleCreated { + defer rm.removeRole(role.name) + } + + return rm.hasLinkHelper(role.name, map[string]*Role{user.name: user}, rm.maxHierarchyLevel), nil +} + +func (rm *RoleManagerImpl) hasLinkHelper(targetName string, roles map[string]*Role, level int) bool { + if level < 0 || len(roles) == 0 { + return false + } + + nextRoles := map[string]*Role{} + for _, role := range roles { + if targetName == role.name || (rm.matchingFunc != nil && rm.Match(role.name, targetName)) { + return true + } + role.rangeRoles(func(key, value interface{}) bool { + nextRoles[key.(string)] = value.(*Role) + return true + }) + } + + return rm.hasLinkHelper(targetName, nextRoles, level-1) +} + +// GetRoles gets the roles that a user inherits. +func (rm *RoleManagerImpl) GetRoles(name string, domains ...string) ([]string, error) { + user, created := rm.getRole(name) + if created { + defer rm.removeRole(user.name) + } + return user.getRoles(), nil +} + +// GetUsers gets the users of a role. +// domain is an unreferenced parameter here, may be used in other implementations. +func (rm *RoleManagerImpl) GetUsers(name string, domain ...string) ([]string, error) { + role, created := rm.getRole(name) + if created { + defer rm.removeRole(role.name) + } + return role.getUsers(), nil +} + +func (rm *RoleManagerImpl) toString() []string { + var roles []string + + rm.allRoles.Range(func(key, value interface{}) bool { + role := value.(*Role) + if text := role.toString(); text != "" { + roles = append(roles, text) + } + return true + }) + + return roles +} + +// PrintRoles prints all the roles to log. +func (rm *RoleManagerImpl) PrintRoles() error { + if !(rm.logger).IsEnabled() { + return nil + } + roles := rm.toString() + rm.logger.LogRole(roles) + return nil +} + +// GetDomains gets domains that a user has. +func (rm *RoleManagerImpl) GetDomains(name string) ([]string, error) { + domains := []string{defaultDomain} + return domains, nil +} + +// GetAllDomains gets all domains. +func (rm *RoleManagerImpl) GetAllDomains() ([]string, error) { + domains := []string{defaultDomain} + return domains, nil +} + +func (rm *RoleManagerImpl) copyFrom(other *RoleManagerImpl) { + other.Range(func(name1, name2 string, domain ...string) bool { + _ = rm.AddLink(name1, name2, domain...) + return true + }) +} + +func rangeLinks(users *sync.Map, fn func(name1, name2 string, domain ...string) bool) { + users.Range(func(_, value interface{}) bool { + user := value.(*Role) + user.roles.Range(func(key, _ interface{}) bool { + roleName := key.(string) + return fn(user.name, roleName, defaultDomain) + }) + return true + }) +} + +func (rm *RoleManagerImpl) Range(fn func(name1, name2 string, domain ...string) bool) { + rangeLinks(rm.allRoles, fn) +} + +// Deprecated: BuildRelationship is no longer required. +func (rm *RoleManagerImpl) BuildRelationship(name1 string, name2 string, domain ...string) error { + return nil +} + +type DomainManager struct { + rmMap *sync.Map + maxHierarchyLevel int + matchingFunc rbac.MatchingFunc + domainMatchingFunc rbac.MatchingFunc + logger log.Logger + matchingFuncCache *util.SyncLRUCache +} + +// NewDomainManager is the constructor for creating an instance of the +// default DomainManager implementation. +func NewDomainManager(maxHierarchyLevel int) *DomainManager { + dm := &DomainManager{} + _ = dm.Clear() // init rmMap and rmCache + dm.maxHierarchyLevel = maxHierarchyLevel + return dm +} + +// SetLogger sets role manager's logger. +func (dm *DomainManager) SetLogger(logger log.Logger) { + dm.logger = logger +} + +// AddMatchingFunc support use pattern in g. +func (dm *DomainManager) AddMatchingFunc(name string, fn rbac.MatchingFunc) { + dm.matchingFunc = fn + dm.rmMap.Range(func(key, value interface{}) bool { + value.(*RoleManagerImpl).AddMatchingFunc(name, fn) + return true + }) +} + +// AddDomainMatchingFunc support use domain pattern in g. +func (dm *DomainManager) AddDomainMatchingFunc(name string, fn rbac.MatchingFunc) { + dm.domainMatchingFunc = fn + dm.rmMap.Range(func(key, value interface{}) bool { + value.(*RoleManagerImpl).AddDomainMatchingFunc(name, fn) + return true + }) + dm.rebuild() +} + +// clears the map of RoleManagers. +func (dm *DomainManager) rebuild() { + rmMap := dm.rmMap + _ = dm.Clear() + rmMap.Range(func(key, value interface{}) bool { + domain := key.(string) + rm := value.(*RoleManagerImpl) + + rm.Range(func(name1, name2 string, _ ...string) bool { + _ = dm.AddLink(name1, name2, domain) + return true + }) + return true + }) +} + +// Clear clears all stored data and resets the role manager to the initial state. +func (dm *DomainManager) Clear() error { + dm.rmMap = &sync.Map{} + dm.matchingFuncCache = util.NewSyncLRUCache(100) + return nil +} + +func (dm *DomainManager) getDomain(domains ...string) (domain string, err error) { + switch len(domains) { + case 0: + return defaultDomain, nil + default: + return domains[0], nil + } +} + +func (dm *DomainManager) Match(str string, pattern string) bool { + if str == pattern { + return true + } + + if dm.domainMatchingFunc != nil { + return dm.domainMatchingFunc(str, pattern) + } else { + return false + } +} + +func (dm *DomainManager) rangeAffectedRoleManagers(domain string, fn func(rm *RoleManagerImpl)) { + if dm.domainMatchingFunc != nil { + dm.rmMap.Range(func(key, value interface{}) bool { + domain2 := key.(string) + if domain != domain2 && dm.Match(domain2, domain) { + fn(value.(*RoleManagerImpl)) + } + return true + }) + } +} + +func (dm *DomainManager) load(name interface{}) (value *RoleManagerImpl, ok bool) { + if r, ok := dm.rmMap.Load(name); ok { + return r.(*RoleManagerImpl), true + } + return nil, false +} + +// load or create a RoleManager instance of domain. +func (dm *DomainManager) getRoleManager(domain string, store bool) *RoleManagerImpl { + var rm *RoleManagerImpl + var ok bool + + if rm, ok = dm.load(domain); !ok { + rm = newRoleManagerWithMatchingFunc(dm.maxHierarchyLevel, dm.matchingFunc) + if store { + dm.rmMap.Store(domain, rm) + } + if dm.domainMatchingFunc != nil { + dm.rmMap.Range(func(key, value interface{}) bool { + domain2 := key.(string) + rm2 := value.(*RoleManagerImpl) + if domain != domain2 && dm.Match(domain, domain2) { + rm.copyFrom(rm2) + } + return true + }) + } + } + return rm +} + +// AddLink adds the inheritance link between role: name1 and role: name2. +// aka role: name1 inherits role: name2. +func (dm *DomainManager) AddLink(name1 string, name2 string, domains ...string) error { + domain, err := dm.getDomain(domains...) + if err != nil { + return err + } + roleManager := dm.getRoleManager(domain, true) // create role manager if it does not exist + _ = roleManager.AddLink(name1, name2, domains...) + + dm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { + _ = rm.AddLink(name1, name2, domains...) + }) + return nil +} + +// DeleteLink deletes the inheritance link between role: name1 and role: name2. +// aka role: name1 does not inherit role: name2 any more. +func (dm *DomainManager) DeleteLink(name1 string, name2 string, domains ...string) error { + domain, err := dm.getDomain(domains...) + if err != nil { + return err + } + roleManager := dm.getRoleManager(domain, true) // create role manager if it does not exist + _ = roleManager.DeleteLink(name1, name2, domains...) + + dm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { + _ = rm.DeleteLink(name1, name2, domains...) + }) + return nil +} + +// HasLink determines whether role: name1 inherits role: name2. +func (dm *DomainManager) HasLink(name1 string, name2 string, domains ...string) (bool, error) { + domain, err := dm.getDomain(domains...) + if err != nil { + return false, err + } + rm := dm.getRoleManager(domain, false) + return rm.HasLink(name1, name2, domains...) +} + +// GetRoles gets the roles that a subject inherits. +func (dm *DomainManager) GetRoles(name string, domains ...string) ([]string, error) { + domain, err := dm.getDomain(domains...) + if err != nil { + return nil, err + } + rm := dm.getRoleManager(domain, false) + return rm.GetRoles(name, domains...) +} + +// GetUsers gets the users of a role. +func (dm *DomainManager) GetUsers(name string, domains ...string) ([]string, error) { + domain, err := dm.getDomain(domains...) + if err != nil { + return nil, err + } + rm := dm.getRoleManager(domain, false) + return rm.GetUsers(name, domains...) +} + +func (dm *DomainManager) toString() []string { + var roles []string + + dm.rmMap.Range(func(key, value interface{}) bool { + domain := key.(string) + rm := value.(*RoleManagerImpl) + domainRoles := rm.toString() + roles = append(roles, fmt.Sprintf("%s: %s", domain, strings.Join(domainRoles, ", "))) + return true + }) + + return roles +} + +// PrintRoles prints all the roles to log. +func (dm *DomainManager) PrintRoles() error { + if !(dm.logger).IsEnabled() { + return nil + } + + roles := dm.toString() + dm.logger.LogRole(roles) + return nil +} + +// GetDomains gets domains that a user has. +func (dm *DomainManager) GetDomains(name string) ([]string, error) { + var domains []string + dm.rmMap.Range(func(key, value interface{}) bool { + domain := key.(string) + rm := value.(*RoleManagerImpl) + role, created := rm.getRole(name) + if created { + defer rm.removeRole(role.name) + } + if len(role.getUsers()) > 0 || len(role.getRoles()) > 0 { + domains = append(domains, domain) + } + return true + }) + return domains, nil +} + +// GetAllDomains gets all domains. +func (dm *DomainManager) GetAllDomains() ([]string, error) { + var domains []string + dm.rmMap.Range(func(key, value interface{}) bool { + domains = append(domains, key.(string)) + return true + }) + return domains, nil +} + +// Deprecated: BuildRelationship is no longer required. +func (dm *DomainManager) BuildRelationship(name1 string, name2 string, domain ...string) error { + return nil +} + +type RoleManager struct { + *DomainManager +} + +func NewRoleManager(maxHierarchyLevel int) *RoleManager { + rm := &RoleManager{} + rm.DomainManager = NewDomainManager(maxHierarchyLevel) + return rm +} + +type ConditionalRoleManager struct { + RoleManagerImpl +} + +func (crm *ConditionalRoleManager) copyFrom(other *ConditionalRoleManager) { + other.Range(func(name1, name2 string, domain ...string) bool { + _ = crm.AddLink(name1, name2, domain...) + return true + }) +} + +// use this constructor to avoid rebuild of AddMatchingFunc. +func newConditionalRoleManagerWithMatchingFunc(maxHierarchyLevel int, fn rbac.MatchingFunc) *ConditionalRoleManager { + rm := NewConditionalRoleManager(maxHierarchyLevel) + rm.matchingFunc = fn + return rm +} + +// NewConditionalRoleManager is the constructor for creating an instance of the +// ConditionalRoleManager implementation. +func NewConditionalRoleManager(maxHierarchyLevel int) *ConditionalRoleManager { + rm := ConditionalRoleManager{} + _ = rm.Clear() // init allRoles and matchingFuncCache + rm.maxHierarchyLevel = maxHierarchyLevel + rm.SetLogger(&log.DefaultLogger{}) + return &rm +} + +// HasLink determines whether role: name1 inherits role: name2. +func (crm *ConditionalRoleManager) HasLink(name1 string, name2 string, domains ...string) (bool, error) { + if name1 == name2 || (crm.matchingFunc != nil && crm.Match(name1, name2)) { + return true, nil + } + + user, userCreated := crm.getRole(name1) + role, roleCreated := crm.getRole(name2) + + if userCreated { + defer crm.removeRole(user.name) + } + if roleCreated { + defer crm.removeRole(role.name) + } + + return crm.hasLinkHelper(role.name, map[string]*Role{user.name: user}, crm.maxHierarchyLevel, domains...), nil +} + +// hasLinkHelper use the Breadth First Search algorithm to traverse the Role tree +// Judging whether the user has a role (has link) is to judge whether the role node can be reached from the user node. +func (crm *ConditionalRoleManager) hasLinkHelper(targetName string, roles map[string]*Role, level int, domains ...string) bool { + if level < 0 || len(roles) == 0 { + return false + } + nextRoles := map[string]*Role{} + for _, role := range roles { + if targetName == role.name || (crm.matchingFunc != nil && crm.Match(role.name, targetName)) { + return true + } + role.rangeRoles(func(key, value interface{}) bool { + nextRole := value.(*Role) + return crm.getNextRoles(role, nextRole, domains, nextRoles) + }) + } + + return crm.hasLinkHelper(targetName, nextRoles, level-1) +} + +func (crm *ConditionalRoleManager) getNextRoles(currentRole, nextRole *Role, domains []string, nextRoles map[string]*Role) bool { + passLinkConditionFunc := true + var err error + // If LinkConditionFunc exists, it needs to pass the verification to get nextRole + if len(domains) == 0 { + if linkConditionFunc, existLinkCondition := crm.GetLinkConditionFunc(currentRole.name, nextRole.name); existLinkCondition { + params, _ := crm.GetLinkConditionFuncParams(currentRole.name, nextRole.name) + passLinkConditionFunc, err = linkConditionFunc(params...) + } + } else { + if linkConditionFunc, existLinkCondition := crm.GetDomainLinkConditionFunc(currentRole.name, nextRole.name, domains[0]); existLinkCondition { + params, _ := crm.GetLinkConditionFuncParams(currentRole.name, nextRole.name, domains[0]) + passLinkConditionFunc, err = linkConditionFunc(params...) + } + } + + if err != nil { + crm.logger.LogError(err, "hasLinkHelper LinkCondition Error") + return false + } + + if passLinkConditionFunc { + nextRoles[nextRole.name] = nextRole + } + + return true +} + +// GetLinkConditionFunc get LinkConditionFunc based on userName, roleName. +func (crm *ConditionalRoleManager) GetLinkConditionFunc(userName, roleName string) (rbac.LinkConditionFunc, bool) { + return crm.GetDomainLinkConditionFunc(userName, roleName, defaultDomain) +} + +// GetDomainLinkConditionFunc get LinkConditionFunc based on userName, roleName, domain. +func (crm *ConditionalRoleManager) GetDomainLinkConditionFunc(userName, roleName, domain string) (rbac.LinkConditionFunc, bool) { + user, userCreated := crm.getRole(userName) + role, roleCreated := crm.getRole(roleName) + + if userCreated { + crm.removeRole(user.name) + return nil, false + } + + if roleCreated { + crm.removeRole(role.name) + return nil, false + } + + return user.getLinkConditionFunc(role, domain) +} + +// GetLinkConditionFuncParams gets parameters of LinkConditionFunc based on userName, roleName, domain. +func (crm *ConditionalRoleManager) GetLinkConditionFuncParams(userName, roleName string, domain ...string) ([]string, bool) { + user, userCreated := crm.getRole(userName) + role, roleCreated := crm.getRole(roleName) + + if userCreated { + crm.removeRole(user.name) + return nil, false + } + + if roleCreated { + crm.removeRole(role.name) + return nil, false + } + + domainName := defaultDomain + if len(domain) != 0 { + domainName = domain[0] + } + + if params, ok := user.getLinkConditionFuncParams(role, domainName); ok { + return params, true + } else { + return nil, false + } +} + +// AddLinkConditionFunc is based on userName, roleName, add LinkConditionFunc. +func (crm *ConditionalRoleManager) AddLinkConditionFunc(userName, roleName string, fn rbac.LinkConditionFunc) { + crm.AddDomainLinkConditionFunc(userName, roleName, defaultDomain, fn) +} + +// AddDomainLinkConditionFunc is based on userName, roleName, domain, add LinkConditionFunc. +func (crm *ConditionalRoleManager) AddDomainLinkConditionFunc(userName, roleName, domain string, fn rbac.LinkConditionFunc) { + user, _ := crm.getRole(userName) + role, _ := crm.getRole(roleName) + + user.addLinkConditionFunc(role, domain, fn) +} + +// SetLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName, domain. +func (crm *ConditionalRoleManager) SetLinkConditionFuncParams(userName, roleName string, params ...string) { + crm.SetDomainLinkConditionFuncParams(userName, roleName, defaultDomain, params...) +} + +// SetDomainLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName, domain. +func (crm *ConditionalRoleManager) SetDomainLinkConditionFuncParams(userName, roleName, domain string, params ...string) { + user, _ := crm.getRole(userName) + role, _ := crm.getRole(roleName) + + user.setLinkConditionFuncParams(role, domain, params...) +} + +type ConditionalDomainManager struct { + ConditionalRoleManager + DomainManager +} + +// NewConditionalDomainManager is the constructor for creating an instance of the +// ConditionalDomainManager implementation. +func NewConditionalDomainManager(maxHierarchyLevel int) *ConditionalDomainManager { + rm := ConditionalDomainManager{} + _ = rm.Clear() // init allRoles and matchingFuncCache + rm.maxHierarchyLevel = maxHierarchyLevel + rm.SetLogger(&log.DefaultLogger{}) + return &rm +} + +func (cdm *ConditionalDomainManager) load(name interface{}) (value *ConditionalRoleManager, ok bool) { + if r, ok := cdm.rmMap.Load(name); ok { + return r.(*ConditionalRoleManager), true + } + return nil, false +} + +// load or create a ConditionalRoleManager instance of domain. +func (cdm *ConditionalDomainManager) getConditionalRoleManager(domain string, store bool) *ConditionalRoleManager { + var rm *ConditionalRoleManager + var ok bool + + if rm, ok = cdm.load(domain); !ok { + rm = newConditionalRoleManagerWithMatchingFunc(cdm.maxHierarchyLevel, cdm.matchingFunc) + if store { + cdm.rmMap.Store(domain, rm) + } + if cdm.domainMatchingFunc != nil { + cdm.rmMap.Range(func(key, value interface{}) bool { + domain2 := key.(string) + rm2 := value.(*ConditionalRoleManager) + if domain != domain2 && cdm.Match(domain, domain2) { + rm.copyFrom(rm2) + } + return true + }) + } + } + return rm +} + +// HasLink determines whether role: name1 inherits role: name2. +func (cdm *ConditionalDomainManager) HasLink(name1 string, name2 string, domains ...string) (bool, error) { + domain, err := cdm.getDomain(domains...) + if err != nil { + return false, err + } + rm := cdm.getConditionalRoleManager(domain, false) + return rm.HasLink(name1, name2, domains...) +} + +// AddLink adds the inheritance link between role: name1 and role: name2. +// aka role: name1 inherits role: name2. +func (cdm *ConditionalDomainManager) AddLink(name1 string, name2 string, domains ...string) error { + domain, err := cdm.getDomain(domains...) + if err != nil { + return err + } + conditionalRoleManager := cdm.getConditionalRoleManager(domain, true) // create role manager if it does not exist + _ = conditionalRoleManager.AddLink(name1, name2, domain) + + cdm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { + _ = rm.AddLink(name1, name2, domain) + }) + return nil +} + +// DeleteLink deletes the inheritance link between role: name1 and role: name2. +// aka role: name1 does not inherit role: name2 any more. +func (cdm *ConditionalDomainManager) DeleteLink(name1 string, name2 string, domains ...string) error { + domain, err := cdm.getDomain(domains...) + if err != nil { + return err + } + conditionalRoleManager := cdm.getConditionalRoleManager(domain, true) // create role manager if it does not exist + _ = conditionalRoleManager.DeleteLink(name1, name2, domain) + + cdm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { + _ = rm.DeleteLink(name1, name2, domain) + }) + return nil +} + +// AddLinkConditionFunc is based on userName, roleName, add LinkConditionFunc. +func (cdm *ConditionalDomainManager) AddLinkConditionFunc(userName, roleName string, fn rbac.LinkConditionFunc) { + cdm.rmMap.Range(func(key, value interface{}) bool { + value.(*ConditionalRoleManager).AddLinkConditionFunc(userName, roleName, fn) + return true + }) +} + +// AddDomainLinkConditionFunc is based on userName, roleName, domain, add LinkConditionFunc. +func (cdm *ConditionalDomainManager) AddDomainLinkConditionFunc(userName, roleName, domain string, fn rbac.LinkConditionFunc) { + cdm.rmMap.Range(func(key, value interface{}) bool { + value.(*ConditionalRoleManager).AddDomainLinkConditionFunc(userName, roleName, domain, fn) + return true + }) +} + +// SetLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName. +func (cdm *ConditionalDomainManager) SetLinkConditionFuncParams(userName, roleName string, params ...string) { + cdm.rmMap.Range(func(key, value interface{}) bool { + value.(*ConditionalRoleManager).SetLinkConditionFuncParams(userName, roleName, params...) + return true + }) +} + +// SetDomainLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName, domain. +func (cdm *ConditionalDomainManager) SetDomainLinkConditionFuncParams(userName, roleName, domain string, params ...string) { + cdm.rmMap.Range(func(key, value interface{}) bool { + value.(*ConditionalRoleManager).SetDomainLinkConditionFuncParams(userName, roleName, domain, params...) + return true + }) +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go b/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go new file mode 100644 index 000000000..28b40a352 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go @@ -0,0 +1,76 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rbac + +import "github.com/casbin/casbin/v2/log" + +type MatchingFunc func(arg1 string, arg2 string) bool + +type LinkConditionFunc = func(args ...string) (bool, error) + +// RoleManager provides interface to define the operations for managing roles. +type RoleManager interface { + // Clear clears all stored data and resets the role manager to the initial state. + Clear() error + // AddLink adds the inheritance link between two roles. role: name1 and role: name2. + // domain is a prefix to the roles (can be used for other purposes). + AddLink(name1 string, name2 string, domain ...string) error + // Deprecated: BuildRelationship is no longer required + BuildRelationship(name1 string, name2 string, domain ...string) error + // DeleteLink deletes the inheritance link between two roles. role: name1 and role: name2. + // domain is a prefix to the roles (can be used for other purposes). + DeleteLink(name1 string, name2 string, domain ...string) error + // HasLink determines whether a link exists between two roles. role: name1 inherits role: name2. + // domain is a prefix to the roles (can be used for other purposes). + HasLink(name1 string, name2 string, domain ...string) (bool, error) + // GetRoles gets the roles that a user inherits. + // domain is a prefix to the roles (can be used for other purposes). + GetRoles(name string, domain ...string) ([]string, error) + // GetUsers gets the users that inherits a role. + // domain is a prefix to the users (can be used for other purposes). + GetUsers(name string, domain ...string) ([]string, error) + // GetDomains gets domains that a user has + GetDomains(name string) ([]string, error) + // GetAllDomains gets all domains + GetAllDomains() ([]string, error) + // PrintRoles prints all the roles to log. + PrintRoles() error + // SetLogger sets role manager's logger. + SetLogger(logger log.Logger) + // Match matches the domain with the pattern + Match(str string, pattern string) bool + // AddMatchingFunc adds the matching function + AddMatchingFunc(name string, fn MatchingFunc) + // AddDomainMatchingFunc adds the domain matching function + AddDomainMatchingFunc(name string, fn MatchingFunc) +} + +// ConditionalRoleManager provides interface to define the operations for managing roles. +// Link with conditions is supported. +type ConditionalRoleManager interface { + RoleManager + + // AddLinkConditionFunc Add condition function fn for Link userName->roleName, + // when fn returns true, Link is valid, otherwise invalid + AddLinkConditionFunc(userName, roleName string, fn LinkConditionFunc) + // SetLinkConditionFuncParams Sets the parameters of the condition function fn for Link userName->roleName + SetLinkConditionFuncParams(userName, roleName string, params ...string) + // AddDomainLinkConditionFunc Add condition function fn for Link userName-> {roleName, domain}, + // when fn returns true, Link is valid, otherwise invalid + AddDomainLinkConditionFunc(user string, role string, domain string, fn LinkConditionFunc) + // SetDomainLinkConditionFuncParams Sets the parameters of the condition function fn + // for Link userName->{roleName, domain} + SetDomainLinkConditionFuncParams(user string, role string, domain string, params ...string) +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac_api.go b/vendor/github.com/casbin/casbin/v2/rbac_api.go new file mode 100644 index 000000000..fe2a6e7a0 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac_api.go @@ -0,0 +1,644 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "fmt" + "strings" + + "github.com/casbin/casbin/v2/constant" + "github.com/casbin/casbin/v2/errors" + "github.com/casbin/casbin/v2/util" +) + +// GetRolesForUser gets the roles that a user has. +func (e *Enforcer) GetRolesForUser(name string, domain ...string) ([]string, error) { + rm := e.GetRoleManager() + if rm == nil { + return nil, fmt.Errorf("role manager is not initialized") + } + res, err := rm.GetRoles(name, domain...) + return res, err +} + +// GetUsersForRole gets the users that has a role. +func (e *Enforcer) GetUsersForRole(name string, domain ...string) ([]string, error) { + rm := e.GetRoleManager() + if rm == nil { + return nil, fmt.Errorf("role manager is not initialized") + } + res, err := rm.GetUsers(name, domain...) + return res, err +} + +// HasRoleForUser determines whether a user has a role. +func (e *Enforcer) HasRoleForUser(name string, role string, domain ...string) (bool, error) { + roles, err := e.GetRolesForUser(name, domain...) + if err != nil { + return false, err + } + hasRole := false + for _, r := range roles { + if r == role { + hasRole = true + break + } + } + + return hasRole, nil +} + +// AddRoleForUser adds a role for a user. +// Returns false if the user already has the role (aka not affected). +func (e *Enforcer) AddRoleForUser(user string, role string, domain ...string) (bool, error) { + args := []string{user, role} + args = append(args, domain...) + return e.AddGroupingPolicy(args) +} + +// AddRolesForUser adds roles for a user. +// Returns false if the user already has the roles (aka not affected). +func (e *Enforcer) AddRolesForUser(user string, roles []string, domain ...string) (bool, error) { + var rules [][]string + for _, role := range roles { + rule := []string{user, role} + rule = append(rule, domain...) + rules = append(rules, rule) + } + return e.AddGroupingPolicies(rules) +} + +// DeleteRoleForUser deletes a role for a user. +// Returns false if the user does not have the role (aka not affected). +func (e *Enforcer) DeleteRoleForUser(user string, role string, domain ...string) (bool, error) { + args := []string{user, role} + args = append(args, domain...) + return e.RemoveGroupingPolicy(args) +} + +// DeleteRolesForUser deletes all roles for a user. +// Returns false if the user does not have any roles (aka not affected). +func (e *Enforcer) DeleteRolesForUser(user string, domain ...string) (bool, error) { + var args []string + if len(domain) == 0 { + args = []string{user} + } else if len(domain) > 1 { + return false, errors.ErrDomainParameter + } else { + args = []string{user, "", domain[0]} + } + return e.RemoveFilteredGroupingPolicy(0, args...) +} + +// DeleteUser deletes a user. +// Returns false if the user does not exist (aka not affected). +func (e *Enforcer) DeleteUser(user string) (bool, error) { + var err error + res1, err := e.RemoveFilteredGroupingPolicy(0, user) + if err != nil { + return res1, err + } + + subIndex, err := e.GetFieldIndex("p", constant.SubjectIndex) + if err != nil { + return false, err + } + res2, err := e.RemoveFilteredPolicy(subIndex, user) + return res1 || res2, err +} + +// DeleteRole deletes a role. +// Returns false if the role does not exist (aka not affected). +func (e *Enforcer) DeleteRole(role string) (bool, error) { + var err error + res1, err := e.RemoveFilteredGroupingPolicy(0, role) + if err != nil { + return res1, err + } + + res2, err := e.RemoveFilteredGroupingPolicy(1, role) + if err != nil { + return res1, err + } + + subIndex, err := e.GetFieldIndex("p", constant.SubjectIndex) + if err != nil { + return false, err + } + res3, err := e.RemoveFilteredPolicy(subIndex, role) + return res1 || res2 || res3, err +} + +// DeletePermission deletes a permission. +// Returns false if the permission does not exist (aka not affected). +func (e *Enforcer) DeletePermission(permission ...string) (bool, error) { + return e.RemoveFilteredPolicy(1, permission...) +} + +// AddPermissionForUser adds a permission for a user or role. +// Returns false if the user or role already has the permission (aka not affected). +func (e *Enforcer) AddPermissionForUser(user string, permission ...string) (bool, error) { + return e.AddPolicy(util.JoinSlice(user, permission...)) +} + +// AddPermissionsForUser adds multiple permissions for a user or role. +// Returns false if the user or role already has one of the permissions (aka not affected). +func (e *Enforcer) AddPermissionsForUser(user string, permissions ...[]string) (bool, error) { + var rules [][]string + for _, permission := range permissions { + rules = append(rules, util.JoinSlice(user, permission...)) + } + return e.AddPolicies(rules) +} + +// DeletePermissionForUser deletes a permission for a user or role. +// Returns false if the user or role does not have the permission (aka not affected). +func (e *Enforcer) DeletePermissionForUser(user string, permission ...string) (bool, error) { + return e.RemovePolicy(util.JoinSlice(user, permission...)) +} + +// DeletePermissionsForUser deletes permissions for a user or role. +// Returns false if the user or role does not have any permissions (aka not affected). +func (e *Enforcer) DeletePermissionsForUser(user string) (bool, error) { + subIndex, err := e.GetFieldIndex("p", constant.SubjectIndex) + if err != nil { + return false, err + } + return e.RemoveFilteredPolicy(subIndex, user) +} + +// GetPermissionsForUser gets permissions for a user or role. +func (e *Enforcer) GetPermissionsForUser(user string, domain ...string) ([][]string, error) { + return e.GetNamedPermissionsForUser("p", user, domain...) +} + +// GetNamedPermissionsForUser gets permissions for a user or role by named policy. +func (e *Enforcer) GetNamedPermissionsForUser(ptype string, user string, domain ...string) ([][]string, error) { + permission := make([][]string, 0) + for pType, assertion := range e.model["p"] { + if pType != ptype { + continue + } + args := make([]string, len(assertion.Tokens)) + subIndex, err := e.GetFieldIndex("p", constant.SubjectIndex) + if err != nil { + subIndex = 0 + } + args[subIndex] = user + + if len(domain) > 0 { + var index int + index, err = e.GetFieldIndex(ptype, constant.DomainIndex) + if err != nil { + return permission, err + } + args[index] = domain[0] + } + perm, err := e.GetFilteredNamedPolicy(ptype, 0, args...) + if err != nil { + return permission, err + } + permission = append(permission, perm...) + } + return permission, nil +} + +// HasPermissionForUser determines whether a user has a permission. +func (e *Enforcer) HasPermissionForUser(user string, permission ...string) (bool, error) { + return e.HasPolicy(util.JoinSlice(user, permission...)) +} + +// GetImplicitRolesForUser gets implicit roles that a user has. +// Compared to GetRolesForUser(), this function retrieves indirect roles besides direct roles. +// For example: +// g, alice, role:admin +// g, role:admin, role:user +// +// GetRolesForUser("alice") can only get: ["role:admin"]. +// But GetImplicitRolesForUser("alice") will get: ["role:admin", "role:user"]. +func (e *Enforcer) GetImplicitRolesForUser(name string, domain ...string) ([]string, error) { + var res []string + + for v := range e.rmMap { + roles, err := e.GetNamedImplicitRolesForUser(v, name, domain...) + if err != nil { + return nil, err + } + res = append(res, roles...) + } + + return res, nil +} + +// GetNamedImplicitRolesForUser gets implicit roles that a user has by named role definition. +// Compared to GetImplicitRolesForUser(), this function retrieves indirect roles besides direct roles. +// For example: +// g, alice, role:admin +// g, role:admin, role:user +// g2, alice, role:admin2 +// +// GetImplicitRolesForUser("alice") can only get: ["role:admin", "role:user"]. +// But GetNamedImplicitRolesForUser("g2", "alice") will get: ["role:admin2"]. +func (e *Enforcer) GetNamedImplicitRolesForUser(ptype string, name string, domain ...string) ([]string, error) { + var res []string + + rm := e.GetNamedRoleManager(ptype) + if rm == nil { + return nil, fmt.Errorf("role manager %s is not initialized", ptype) + } + roleSet := make(map[string]bool) + roleSet[name] = true + q := make([]string, 0) + q = append(q, name) + + for len(q) > 0 { + name := q[0] + q = q[1:] + + roles, err := rm.GetRoles(name, domain...) + if err != nil { + return nil, err + } + for _, r := range roles { + if _, ok := roleSet[r]; !ok { + res = append(res, r) + q = append(q, r) + roleSet[r] = true + } + } + } + + return res, nil +} + +// GetImplicitUsersForRole gets implicit users for a role. +func (e *Enforcer) GetImplicitUsersForRole(name string, domain ...string) ([]string, error) { + res := []string{} + + for _, rm := range e.rmMap { + roleSet := make(map[string]bool) + roleSet[name] = true + q := make([]string, 0) + q = append(q, name) + + for len(q) > 0 { + name := q[0] + q = q[1:] + + roles, err := rm.GetUsers(name, domain...) + if err != nil && err.Error() != "error: name does not exist" { + return nil, err + } + for _, r := range roles { + if _, ok := roleSet[r]; !ok { + res = append(res, r) + q = append(q, r) + roleSet[r] = true + } + } + } + } + + return res, nil +} + +// GetImplicitPermissionsForUser gets implicit permissions for a user or role. +// Compared to GetPermissionsForUser(), this function retrieves permissions for inherited roles. +// For example: +// p, admin, data1, read +// p, alice, data2, read +// g, alice, admin +// +// GetPermissionsForUser("alice") can only get: [["alice", "data2", "read"]]. +// But GetImplicitPermissionsForUser("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]]. +func (e *Enforcer) GetImplicitPermissionsForUser(user string, domain ...string) ([][]string, error) { + return e.GetNamedImplicitPermissionsForUser("p", "g", user, domain...) +} + +// GetNamedImplicitPermissionsForUser gets implicit permissions for a user or role by named policy. +// Compared to GetNamedPermissionsForUser(), this function retrieves permissions for inherited roles. +// For example: +// p, admin, data1, read +// p2, admin, create +// g, alice, admin +// +// GetImplicitPermissionsForUser("alice") can only get: [["admin", "data1", "read"]], whose policy is default policy "p" +// But you can specify the named policy "p2" to get: [["admin", "create"]] by GetNamedImplicitPermissionsForUser("p2","alice"). +func (e *Enforcer) GetNamedImplicitPermissionsForUser(ptype string, gtype string, user string, domain ...string) ([][]string, error) { + permission := make([][]string, 0) + rm := e.GetNamedRoleManager(gtype) + if rm == nil { + return nil, fmt.Errorf("role manager %s is not initialized", gtype) + } + + roles, err := e.GetNamedImplicitRolesForUser(gtype, user, domain...) + if err != nil { + return nil, err + } + policyRoles := make(map[string]struct{}, len(roles)+1) + policyRoles[user] = struct{}{} + for _, r := range roles { + policyRoles[r] = struct{}{} + } + + domainIndex, err := e.GetFieldIndex(ptype, constant.DomainIndex) + for _, rule := range e.model["p"][ptype].Policy { + if len(domain) == 0 { + if _, ok := policyRoles[rule[0]]; ok { + permission = append(permission, deepCopyPolicy(rule)) + } + continue + } + if len(domain) > 1 { + return nil, errors.ErrDomainParameter + } + if err != nil { + return nil, err + } + d := domain[0] + matched := rm.Match(d, rule[domainIndex]) + if !matched { + continue + } + if _, ok := policyRoles[rule[0]]; ok { + newRule := deepCopyPolicy(rule) + newRule[domainIndex] = d + permission = append(permission, newRule) + } + } + return permission, nil +} + +// GetImplicitUsersForPermission gets implicit users for a permission. +// For example: +// p, admin, data1, read +// p, bob, data1, read +// g, alice, admin +// +// GetImplicitUsersForPermission("data1", "read") will get: ["alice", "bob"]. +// Note: only users will be returned, roles (2nd arg in "g") will be excluded. +func (e *Enforcer) GetImplicitUsersForPermission(permission ...string) ([]string, error) { + pSubjects, err := e.GetAllSubjects() + if err != nil { + return nil, err + } + gInherit, err := e.model.GetValuesForFieldInPolicyAllTypes("g", 1) + if err != nil { + return nil, err + } + gSubjects, err := e.model.GetValuesForFieldInPolicyAllTypes("g", 0) + if err != nil { + return nil, err + } + + subjects := append(pSubjects, gSubjects...) + util.ArrayRemoveDuplicates(&subjects) + + subjects = util.SetSubtract(subjects, gInherit) + + res := []string{} + for _, user := range subjects { + req := util.JoinSliceAny(user, permission...) + allowed, err := e.Enforce(req...) + if err != nil { + return nil, err + } + + if allowed { + res = append(res, user) + } + } + + return res, nil +} + +// GetDomainsForUser gets all domains. +func (e *Enforcer) GetDomainsForUser(user string) ([]string, error) { + var domains []string + for _, rm := range e.rmMap { + domain, err := rm.GetDomains(user) + if err != nil { + return nil, err + } + domains = append(domains, domain...) + } + return domains, nil +} + +// GetImplicitResourcesForUser returns all policies that user obtaining in domain. +func (e *Enforcer) GetImplicitResourcesForUser(user string, domain ...string) ([][]string, error) { + permissions, err := e.GetImplicitPermissionsForUser(user, domain...) + if err != nil { + return nil, err + } + res := make([][]string, 0) + for _, permission := range permissions { + if permission[0] == user { + res = append(res, permission) + continue + } + resLocal := [][]string{{user}} + tokensLength := len(permission) + t := make([][]string, 1, tokensLength) + for _, token := range permission[1:] { + tokens, err := e.GetImplicitUsersForRole(token, domain...) + if err != nil { + return nil, err + } + tokens = append(tokens, token) + t = append(t, tokens) + } + for i := 1; i < tokensLength; i++ { + n := make([][]string, 0) + for _, tokens := range t[i] { + for _, policy := range resLocal { + t := append([]string(nil), policy...) + t = append(t, tokens) + n = append(n, t) + } + } + resLocal = n + } + res = append(res, resLocal...) + } + return res, nil +} + +// deepCopyPolicy returns a deepcopy version of the policy to prevent changing policies through returned slice. +func deepCopyPolicy(src []string) []string { + newRule := make([]string, len(src)) + copy(newRule, src) + return newRule +} + +// GetAllowedObjectConditions returns a string array of object conditions that the user can access. +// For example: conditions, err := e.GetAllowedObjectConditions("alice", "read", "r.obj.") +// Note: +// +// 0. prefix: You can customize the prefix of the object conditions, and "r.obj." is commonly used as a prefix. +// After removing the prefix, the remaining part is the condition of the object. +// If there is an obj policy that does not meet the prefix requirement, an errors.ERR_OBJ_CONDITION will be returned. +// +// 1. If the 'objectConditions' array is empty, return errors.ERR_EMPTY_CONDITION +// This error is returned because some data adapters' ORM return full table data by default +// when they receive an empty condition, which tends to behave contrary to expectations.(e.g. GORM) +// If you are using an adapter that does not behave like this, you can choose to ignore this error. +func (e *Enforcer) GetAllowedObjectConditions(user string, action string, prefix string) ([]string, error) { + permissions, err := e.GetImplicitPermissionsForUser(user) + if err != nil { + return nil, err + } + + var objectConditions []string + for _, policy := range permissions { + // policy {sub, obj, act} + if policy[2] == action { + if !strings.HasPrefix(policy[1], prefix) { + return nil, errors.ErrObjCondition + } + objectConditions = append(objectConditions, strings.TrimPrefix(policy[1], prefix)) + } + } + + if len(objectConditions) == 0 { + return nil, errors.ErrEmptyCondition + } + + return objectConditions, nil +} + +// removeDuplicatePermissions Convert permissions to string as a hash to deduplicate. +func removeDuplicatePermissions(permissions [][]string) [][]string { + permissionsSet := make(map[string]bool) + res := make([][]string, 0) + for _, permission := range permissions { + permissionStr := util.ArrayToString(permission) + if permissionsSet[permissionStr] { + continue + } + permissionsSet[permissionStr] = true + res = append(res, permission) + } + return res +} + +// GetImplicitUsersForResource return implicit user based on resource. +// for example: +// p, alice, data1, read +// p, bob, data2, write +// p, data2_admin, data2, read +// p, data2_admin, data2, write +// g, alice, data2_admin +// GetImplicitUsersForResource("data2") will return [[bob data2 write] [alice data2 read] [alice data2 write]] +// GetImplicitUsersForResource("data1") will return [[alice data1 read]] +// Note: only users will be returned, roles (2nd arg in "g") will be excluded. +func (e *Enforcer) GetImplicitUsersForResource(resource string) ([][]string, error) { + permissions := make([][]string, 0) + subjectIndex, _ := e.GetFieldIndex("p", "sub") + objectIndex, _ := e.GetFieldIndex("p", "obj") + rm := e.GetRoleManager() + if rm == nil { + return nil, fmt.Errorf("role manager is not initialized") + } + + isRole := make(map[string]bool) + roles, err := e.GetAllRoles() + if err != nil { + return nil, err + } + for _, role := range roles { + isRole[role] = true + } + + for _, rule := range e.model["p"]["p"].Policy { + obj := rule[objectIndex] + if obj != resource { + continue + } + + sub := rule[subjectIndex] + + if !isRole[sub] { + permissions = append(permissions, rule) + } else { + users, err := rm.GetUsers(sub) + if err != nil { + return nil, err + } + + for _, user := range users { + implicitUserRule := deepCopyPolicy(rule) + implicitUserRule[subjectIndex] = user + permissions = append(permissions, implicitUserRule) + } + } + } + + res := removeDuplicatePermissions(permissions) + return res, nil +} + +// GetImplicitUsersForResourceByDomain return implicit user based on resource and domain. +// Compared to GetImplicitUsersForResource, domain is supported. +func (e *Enforcer) GetImplicitUsersForResourceByDomain(resource string, domain string) ([][]string, error) { + permissions := make([][]string, 0) + subjectIndex, _ := e.GetFieldIndex("p", "sub") + objectIndex, _ := e.GetFieldIndex("p", "obj") + domIndex, _ := e.GetFieldIndex("p", "dom") + rm := e.GetRoleManager() + if rm == nil { + return nil, fmt.Errorf("role manager is not initialized") + } + + isRole := make(map[string]bool) + + if roles, err := e.GetAllRolesByDomain(domain); err != nil { + return nil, err + } else { + for _, role := range roles { + isRole[role] = true + } + } + + for _, rule := range e.model["p"]["p"].Policy { + obj := rule[objectIndex] + if obj != resource { + continue + } + + sub := rule[subjectIndex] + + if !isRole[sub] { + permissions = append(permissions, rule) + } else { + if domain != rule[domIndex] { + continue + } + users, err := rm.GetUsers(sub, domain) + if err != nil { + return nil, err + } + + for _, user := range users { + implicitUserRule := deepCopyPolicy(rule) + implicitUserRule[subjectIndex] = user + permissions = append(permissions, implicitUserRule) + } + } + } + + res := removeDuplicatePermissions(permissions) + return res, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac_api_synced.go b/vendor/github.com/casbin/casbin/v2/rbac_api_synced.go new file mode 100644 index 000000000..7b10e565d --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac_api_synced.go @@ -0,0 +1,203 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +// GetRolesForUser gets the roles that a user has. +func (e *SyncedEnforcer) GetRolesForUser(name string, domain ...string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetRolesForUser(name, domain...) +} + +// GetUsersForRole gets the users that has a role. +func (e *SyncedEnforcer) GetUsersForRole(name string, domain ...string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetUsersForRole(name, domain...) +} + +// HasRoleForUser determines whether a user has a role. +func (e *SyncedEnforcer) HasRoleForUser(name string, role string, domain ...string) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.HasRoleForUser(name, role, domain...) +} + +// AddRoleForUser adds a role for a user. +// Returns false if the user already has the role (aka not affected). +func (e *SyncedEnforcer) AddRoleForUser(user string, role string, domain ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddRoleForUser(user, role, domain...) +} + +// AddRolesForUser adds roles for a user. +// Returns false if the user already has the roles (aka not affected). +func (e *SyncedEnforcer) AddRolesForUser(user string, roles []string, domain ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddRolesForUser(user, roles, domain...) +} + +// DeleteRoleForUser deletes a role for a user. +// Returns false if the user does not have the role (aka not affected). +func (e *SyncedEnforcer) DeleteRoleForUser(user string, role string, domain ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeleteRoleForUser(user, role, domain...) +} + +// DeleteRolesForUser deletes all roles for a user. +// Returns false if the user does not have any roles (aka not affected). +func (e *SyncedEnforcer) DeleteRolesForUser(user string, domain ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeleteRolesForUser(user, domain...) +} + +// DeleteUser deletes a user. +// Returns false if the user does not exist (aka not affected). +func (e *SyncedEnforcer) DeleteUser(user string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeleteUser(user) +} + +// DeleteRole deletes a role. +// Returns false if the role does not exist (aka not affected). +func (e *SyncedEnforcer) DeleteRole(role string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeleteRole(role) +} + +// DeletePermission deletes a permission. +// Returns false if the permission does not exist (aka not affected). +func (e *SyncedEnforcer) DeletePermission(permission ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeletePermission(permission...) +} + +// AddPermissionForUser adds a permission for a user or role. +// Returns false if the user or role already has the permission (aka not affected). +func (e *SyncedEnforcer) AddPermissionForUser(user string, permission ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddPermissionForUser(user, permission...) +} + +// AddPermissionsForUser adds permissions for a user or role. +// Returns false if the user or role already has the permissions (aka not affected). +func (e *SyncedEnforcer) AddPermissionsForUser(user string, permissions ...[]string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddPermissionsForUser(user, permissions...) +} + +// DeletePermissionForUser deletes a permission for a user or role. +// Returns false if the user or role does not have the permission (aka not affected). +func (e *SyncedEnforcer) DeletePermissionForUser(user string, permission ...string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeletePermissionForUser(user, permission...) +} + +// DeletePermissionsForUser deletes permissions for a user or role. +// Returns false if the user or role does not have any permissions (aka not affected). +func (e *SyncedEnforcer) DeletePermissionsForUser(user string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeletePermissionsForUser(user) +} + +// GetPermissionsForUser gets permissions for a user or role. +func (e *SyncedEnforcer) GetPermissionsForUser(user string, domain ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetPermissionsForUser(user, domain...) +} + +// GetNamedPermissionsForUser gets permissions for a user or role by named policy. +func (e *SyncedEnforcer) GetNamedPermissionsForUser(ptype string, user string, domain ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetNamedPermissionsForUser(ptype, user, domain...) +} + +// HasPermissionForUser determines whether a user has a permission. +func (e *SyncedEnforcer) HasPermissionForUser(user string, permission ...string) (bool, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.HasPermissionForUser(user, permission...) +} + +// GetImplicitRolesForUser gets implicit roles that a user has. +// Compared to GetRolesForUser(), this function retrieves indirect roles besides direct roles. +// For example: +// g, alice, role:admin +// g, role:admin, role:user +// +// GetRolesForUser("alice") can only get: ["role:admin"]. +// But GetImplicitRolesForUser("alice") will get: ["role:admin", "role:user"]. +func (e *SyncedEnforcer) GetImplicitRolesForUser(name string, domain ...string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetImplicitRolesForUser(name, domain...) +} + +// GetImplicitPermissionsForUser gets implicit permissions for a user or role. +// Compared to GetPermissionsForUser(), this function retrieves permissions for inherited roles. +// For example: +// p, admin, data1, read +// p, alice, data2, read +// g, alice, admin +// +// GetPermissionsForUser("alice") can only get: [["alice", "data2", "read"]]. +// But GetImplicitPermissionsForUser("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]]. +func (e *SyncedEnforcer) GetImplicitPermissionsForUser(user string, domain ...string) ([][]string, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.GetImplicitPermissionsForUser(user, domain...) +} + +// GetNamedImplicitPermissionsForUser gets implicit permissions for a user or role by named policy. +// Compared to GetNamedPermissionsForUser(), this function retrieves permissions for inherited roles. +// For example: +// p, admin, data1, read +// p2, admin, create +// g, alice, admin +// +// GetImplicitPermissionsForUser("alice") can only get: [["admin", "data1", "read"]], whose policy is default policy "p" +// But you can specify the named policy "p2" to get: [["admin", "create"]] by GetNamedImplicitPermissionsForUser("p2","alice"). +func (e *SyncedEnforcer) GetNamedImplicitPermissionsForUser(ptype string, gtype string, user string, domain ...string) ([][]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetNamedImplicitPermissionsForUser(ptype, gtype, user, domain...) +} + +// GetImplicitUsersForPermission gets implicit users for a permission. +// For example: +// p, admin, data1, read +// p, bob, data1, read +// g, alice, admin +// +// GetImplicitUsersForPermission("data1", "read") will get: ["alice", "bob"]. +// Note: only users will be returned, roles (2nd arg in "g") will be excluded. +func (e *SyncedEnforcer) GetImplicitUsersForPermission(permission ...string) ([]string, error) { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetImplicitUsersForPermission(permission...) +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains.go b/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains.go new file mode 100644 index 000000000..f6fc4a24e --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains.go @@ -0,0 +1,192 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +import ( + "fmt" + + "github.com/casbin/casbin/v2/constant" +) + +// GetUsersForRoleInDomain gets the users that has a role inside a domain. Add by Gordon. +func (e *Enforcer) GetUsersForRoleInDomain(name string, domain string) []string { + if e.GetRoleManager() == nil { + return nil + } + res, _ := e.GetRoleManager().GetUsers(name, domain) + return res +} + +// GetRolesForUserInDomain gets the roles that a user has inside a domain. +func (e *Enforcer) GetRolesForUserInDomain(name string, domain string) []string { + if e.GetRoleManager() == nil { + return nil + } + res, _ := e.GetRoleManager().GetRoles(name, domain) + return res +} + +// GetPermissionsForUserInDomain gets permissions for a user or role inside a domain. +func (e *Enforcer) GetPermissionsForUserInDomain(user string, domain string) [][]string { + res, _ := e.GetImplicitPermissionsForUser(user, domain) + return res +} + +// AddRoleForUserInDomain adds a role for a user inside a domain. +// Returns false if the user already has the role (aka not affected). +func (e *Enforcer) AddRoleForUserInDomain(user string, role string, domain string) (bool, error) { + return e.AddGroupingPolicy(user, role, domain) +} + +// DeleteRoleForUserInDomain deletes a role for a user inside a domain. +// Returns false if the user does not have the role (aka not affected). +func (e *Enforcer) DeleteRoleForUserInDomain(user string, role string, domain string) (bool, error) { + return e.RemoveGroupingPolicy(user, role, domain) +} + +// DeleteRolesForUserInDomain deletes all roles for a user inside a domain. +// Returns false if the user does not have any roles (aka not affected). +func (e *Enforcer) DeleteRolesForUserInDomain(user string, domain string) (bool, error) { + if e.GetRoleManager() == nil { + return false, fmt.Errorf("role manager is not initialized") + } + roles, err := e.GetRoleManager().GetRoles(user, domain) + if err != nil { + return false, err + } + + var rules [][]string + for _, role := range roles { + rules = append(rules, []string{user, role, domain}) + } + + return e.RemoveGroupingPolicies(rules) +} + +// GetAllUsersByDomain would get all users associated with the domain. +func (e *Enforcer) GetAllUsersByDomain(domain string) ([]string, error) { + m := make(map[string]struct{}) + g, err := e.model.GetAssertion("g", "g") + if err != nil { + return []string{}, err + } + p := e.model["p"]["p"] + users := make([]string, 0) + index, err := e.GetFieldIndex("p", constant.DomainIndex) + if err != nil { + return []string{}, err + } + + getUser := func(index int, policies [][]string, domain string, m map[string]struct{}) []string { + if len(policies) == 0 || len(policies[0]) <= index { + return []string{} + } + res := make([]string, 0) + for _, policy := range policies { + if _, ok := m[policy[0]]; policy[index] == domain && !ok { + res = append(res, policy[0]) + m[policy[0]] = struct{}{} + } + } + return res + } + + users = append(users, getUser(2, g.Policy, domain, m)...) + users = append(users, getUser(index, p.Policy, domain, m)...) + return users, nil +} + +// DeleteAllUsersByDomain would delete all users associated with the domain. +func (e *Enforcer) DeleteAllUsersByDomain(domain string) (bool, error) { + g, err := e.model.GetAssertion("g", "g") + if err != nil { + return false, err + } + p := e.model["p"]["p"] + index, err := e.GetFieldIndex("p", constant.DomainIndex) + if err != nil { + return false, err + } + + getUser := func(index int, policies [][]string, domain string) [][]string { + if len(policies) == 0 || len(policies[0]) <= index { + return [][]string{} + } + res := make([][]string, 0) + for _, policy := range policies { + if policy[index] == domain { + res = append(res, policy) + } + } + return res + } + + users := getUser(2, g.Policy, domain) + if _, err = e.RemoveGroupingPolicies(users); err != nil { + return false, err + } + users = getUser(index, p.Policy, domain) + if _, err = e.RemovePolicies(users); err != nil { + return false, err + } + return true, nil +} + +// DeleteDomains would delete all associated users and roles. +// It would delete all domains if parameter is not provided. +func (e *Enforcer) DeleteDomains(domains ...string) (bool, error) { + if len(domains) == 0 { + e.ClearPolicy() + return true, nil + } + for _, domain := range domains { + if _, err := e.DeleteAllUsersByDomain(domain); err != nil { + return false, err + } + } + return true, nil +} + +// GetAllDomains would get all domains. +func (e *Enforcer) GetAllDomains() ([]string, error) { + if e.GetRoleManager() == nil { + return nil, fmt.Errorf("role manager is not initialized") + } + return e.GetRoleManager().GetAllDomains() +} + +// GetAllRolesByDomain would get all roles associated with the domain. +// note: Not applicable to Domains with inheritance relationship (implicit roles) +func (e *Enforcer) GetAllRolesByDomain(domain string) ([]string, error) { + g, err := e.model.GetAssertion("g", "g") + if err != nil { + return []string{}, err + } + policies := g.Policy + roles := make([]string, 0) + existMap := make(map[string]bool) // remove duplicates + + for _, policy := range policies { + if policy[len(policy)-1] == domain { + role := policy[len(policy)-2] + if _, ok := existMap[role]; !ok { + roles = append(roles, role) + existMap[role] = true + } + } + } + + return roles, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains_synced.go b/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains_synced.go new file mode 100644 index 000000000..26f6ce4ba --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac_api_with_domains_synced.go @@ -0,0 +1,60 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package casbin + +// GetUsersForRoleInDomain gets the users that has a role inside a domain. Add by Gordon. +func (e *SyncedEnforcer) GetUsersForRoleInDomain(name string, domain string) []string { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetUsersForRoleInDomain(name, domain) +} + +// GetRolesForUserInDomain gets the roles that a user has inside a domain. +func (e *SyncedEnforcer) GetRolesForUserInDomain(name string, domain string) []string { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetRolesForUserInDomain(name, domain) +} + +// GetPermissionsForUserInDomain gets permissions for a user or role inside a domain. +func (e *SyncedEnforcer) GetPermissionsForUserInDomain(user string, domain string) [][]string { + e.m.RLock() + defer e.m.RUnlock() + return e.Enforcer.GetPermissionsForUserInDomain(user, domain) +} + +// AddRoleForUserInDomain adds a role for a user inside a domain. +// Returns false if the user already has the role (aka not affected). +func (e *SyncedEnforcer) AddRoleForUserInDomain(user string, role string, domain string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.AddRoleForUserInDomain(user, role, domain) +} + +// DeleteRoleForUserInDomain deletes a role for a user inside a domain. +// Returns false if the user does not have the role (aka not affected). +func (e *SyncedEnforcer) DeleteRoleForUserInDomain(user string, role string, domain string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeleteRoleForUserInDomain(user, role, domain) +} + +// DeleteRolesForUserInDomain deletes all roles for a user inside a domain. +// Returns false if the user does not have any roles (aka not affected). +func (e *SyncedEnforcer) DeleteRolesForUserInDomain(user string, domain string) (bool, error) { + e.m.Lock() + defer e.m.Unlock() + return e.Enforcer.DeleteRolesForUserInDomain(user, domain) +} diff --git a/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go b/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go new file mode 100644 index 000000000..ea1b4801d --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go @@ -0,0 +1,498 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "errors" + "fmt" + "net" + "regexp" + "strings" + "sync" + "time" + + "github.com/bmatcuk/doublestar/v4" + + "github.com/casbin/casbin/v2/rbac" + + "github.com/casbin/govaluate" +) + +var ( + keyMatch2Re = regexp.MustCompile(`:[^/]+`) + keyMatch3Re = regexp.MustCompile(`\{[^/]+\}`) + keyMatch4Re = regexp.MustCompile(`{([^/]+)}`) + keyMatch5Re = regexp.MustCompile(`\{[^/]+\}`) + keyGet2Re1 = regexp.MustCompile(`:[^/]+`) + keyGet3Re1 = regexp.MustCompile(`\{[^/]+?\}`) // non-greedy match of `{...}` to support multiple {} in `/.../` + reCache = map[string]*regexp.Regexp{} + reCacheMu = sync.RWMutex{} +) + +func mustCompileOrGet(key string) *regexp.Regexp { + reCacheMu.RLock() + re, ok := reCache[key] + reCacheMu.RUnlock() + + if !ok { + re = regexp.MustCompile(key) + reCacheMu.Lock() + reCache[key] = re + reCacheMu.Unlock() + } + + return re +} + +// validate the variadic parameter size and type as string. +func validateVariadicArgs(expectedLen int, args ...interface{}) error { + if len(args) != expectedLen { + return fmt.Errorf("expected %d arguments, but got %d", expectedLen, len(args)) + } + + for _, p := range args { + _, ok := p.(string) + if !ok { + return errors.New("argument must be a string") + } + } + + return nil +} + +// validate the variadic string parameter size. +func validateVariadicStringArgs(expectedLen int, args ...string) error { + if len(args) != expectedLen { + return fmt.Errorf("expected %d arguments, but got %d", expectedLen, len(args)) + } + return nil +} + +// KeyMatch determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. +// For example, "/foo/bar" matches "/foo/*". +func KeyMatch(key1 string, key2 string) bool { + i := strings.Index(key2, "*") + if i == -1 { + return key1 == key2 + } + + if len(key1) > i { + return key1[:i] == key2[:i] + } + return key1 == key2[:i] +} + +// KeyMatchFunc is the wrapper for KeyMatch. +func KeyMatchFunc(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyMatch", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return KeyMatch(name1, name2), nil +} + +// KeyGet returns the matched part +// For example, "/foo/bar/foo" matches "/foo/*" +// "bar/foo" will been returned. +func KeyGet(key1, key2 string) string { + i := strings.Index(key2, "*") + if i == -1 { + return "" + } + if len(key1) > i { + if key1[:i] == key2[:i] { + return key1[i:] + } + } + return "" +} + +// KeyGetFunc is the wrapper for KeyGet. +func KeyGetFunc(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyGet", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return KeyGet(name1, name2), nil +} + +// KeyMatch2 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. +// For example, "/foo/bar" matches "/foo/*", "/resource1" matches "/:resource". +func KeyMatch2(key1 string, key2 string) bool { + key2 = strings.Replace(key2, "/*", "/.*", -1) + + key2 = keyMatch2Re.ReplaceAllString(key2, "$1[^/]+$2") + + return RegexMatch(key1, "^"+key2+"$") +} + +// KeyMatch2Func is the wrapper for KeyMatch2. +func KeyMatch2Func(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyMatch2", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return KeyMatch2(name1, name2), nil +} + +// KeyGet2 returns value matched pattern +// For example, "/resource1" matches "/:resource" +// if the pathVar == "resource", then "resource1" will be returned. +func KeyGet2(key1, key2 string, pathVar string) string { + key2 = strings.Replace(key2, "/*", "/.*", -1) + keys := keyGet2Re1.FindAllString(key2, -1) + key2 = keyGet2Re1.ReplaceAllString(key2, "$1([^/]+)$2") + key2 = "^" + key2 + "$" + + re := mustCompileOrGet(key2) + values := re.FindAllStringSubmatch(key1, -1) + if len(values) == 0 { + return "" + } + for i, key := range keys { + if pathVar == key[1:] { + return values[0][i+1] + } + } + return "" +} + +// KeyGet2Func is the wrapper for KeyGet2. +func KeyGet2Func(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(3, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyGet2", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + key := args[2].(string) + + return KeyGet2(name1, name2, key), nil +} + +// KeyMatch3 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. +// For example, "/foo/bar" matches "/foo/*", "/resource1" matches "/{resource}". +func KeyMatch3(key1 string, key2 string) bool { + key2 = strings.Replace(key2, "/*", "/.*", -1) + key2 = keyMatch3Re.ReplaceAllString(key2, "$1[^/]+$2") + + return RegexMatch(key1, "^"+key2+"$") +} + +// KeyMatch3Func is the wrapper for KeyMatch3. +func KeyMatch3Func(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyMatch3", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return KeyMatch3(name1, name2), nil +} + +// KeyGet3 returns value matched pattern +// For example, "project/proj_project1_admin/" matches "project/proj_{project}_admin/" +// if the pathVar == "project", then "project1" will be returned. +func KeyGet3(key1, key2 string, pathVar string) string { + key2 = strings.Replace(key2, "/*", "/.*", -1) + + keys := keyGet3Re1.FindAllString(key2, -1) + key2 = keyGet3Re1.ReplaceAllString(key2, "$1([^/]+?)$2") + key2 = "^" + key2 + "$" + re := mustCompileOrGet(key2) + values := re.FindAllStringSubmatch(key1, -1) + if len(values) == 0 { + return "" + } + for i, key := range keys { + if pathVar == key[1:len(key)-1] { + return values[0][i+1] + } + } + return "" +} + +// KeyGet3Func is the wrapper for KeyGet3. +func KeyGet3Func(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(3, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyGet3", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + key := args[2].(string) + + return KeyGet3(name1, name2, key), nil +} + +// KeyMatch4 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. +// Besides what KeyMatch3 does, KeyMatch4 can also match repeated patterns: +// "/parent/123/child/123" matches "/parent/{id}/child/{id}" +// "/parent/123/child/456" does not match "/parent/{id}/child/{id}" +// But KeyMatch3 will match both. +func KeyMatch4(key1 string, key2 string) bool { + key2 = strings.Replace(key2, "/*", "/.*", -1) + + tokens := []string{} + + re := keyMatch4Re + key2 = re.ReplaceAllStringFunc(key2, func(s string) string { + tokens = append(tokens, s[1:len(s)-1]) + return "([^/]+)" + }) + + re = mustCompileOrGet("^" + key2 + "$") + matches := re.FindStringSubmatch(key1) + if matches == nil { + return false + } + matches = matches[1:] + + if len(tokens) != len(matches) { + panic(errors.New("KeyMatch4: number of tokens is not equal to number of values")) + } + + values := map[string]string{} + + for key, token := range tokens { + if _, ok := values[token]; !ok { + values[token] = matches[key] + } + if values[token] != matches[key] { + return false + } + } + + return true +} + +// KeyMatch4Func is the wrapper for KeyMatch4. +func KeyMatch4Func(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyMatch4", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return KeyMatch4(name1, name2), nil +} + +// KeyMatch5 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a * +// For example, +// - "/foo/bar?status=1&type=2" matches "/foo/bar" +// - "/parent/child1" and "/parent/child1" matches "/parent/*" +// - "/parent/child1?status=1" matches "/parent/*". +func KeyMatch5(key1 string, key2 string) bool { + i := strings.Index(key1, "?") + + if i != -1 { + key1 = key1[:i] + } + + key2 = strings.Replace(key2, "/*", "/.*", -1) + key2 = keyMatch5Re.ReplaceAllString(key2, "$1[^/]+$2") + + return RegexMatch(key1, "^"+key2+"$") +} + +// KeyMatch5Func is the wrapper for KeyMatch5. +func KeyMatch5Func(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "keyMatch5", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return KeyMatch5(name1, name2), nil +} + +// RegexMatch determines whether key1 matches the pattern of key2 in regular expression. +func RegexMatch(key1 string, key2 string) bool { + res, err := regexp.MatchString(key2, key1) + if err != nil { + panic(err) + } + return res +} + +// RegexMatchFunc is the wrapper for RegexMatch. +func RegexMatchFunc(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "regexMatch", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return RegexMatch(name1, name2), nil +} + +// IPMatch determines whether IP address ip1 matches the pattern of IP address ip2, ip2 can be an IP address or a CIDR pattern. +// For example, "192.168.2.123" matches "192.168.2.0/24". +func IPMatch(ip1 string, ip2 string) bool { + objIP1 := net.ParseIP(ip1) + if objIP1 == nil { + panic("invalid argument: ip1 in IPMatch() function is not an IP address.") + } + + _, cidr, err := net.ParseCIDR(ip2) + if err != nil { + objIP2 := net.ParseIP(ip2) + if objIP2 == nil { + panic("invalid argument: ip2 in IPMatch() function is neither an IP address nor a CIDR.") + } + + return objIP1.Equal(objIP2) + } + + return cidr.Contains(objIP1) +} + +// IPMatchFunc is the wrapper for IPMatch. +func IPMatchFunc(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "ipMatch", err) + } + + ip1 := args[0].(string) + ip2 := args[1].(string) + + return IPMatch(ip1, ip2), nil +} + +// GlobMatch determines whether key1 matches the pattern of key2 using glob pattern. +func GlobMatch(key1 string, key2 string) (bool, error) { + return doublestar.Match(key2, key1) +} + +// GlobMatchFunc is the wrapper for GlobMatch. +func GlobMatchFunc(args ...interface{}) (interface{}, error) { + if err := validateVariadicArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "globMatch", err) + } + + name1 := args[0].(string) + name2 := args[1].(string) + + return GlobMatch(name1, name2) +} + +// GenerateGFunction is the factory method of the g(_, _[, _]) function. +func GenerateGFunction(rm rbac.RoleManager) govaluate.ExpressionFunction { + memorized := sync.Map{} + return func(args ...interface{}) (interface{}, error) { + // Like all our other govaluate functions, all args are strings. + + // Allocate and generate a cache key from the arguments... + total := len(args) + for _, a := range args { + aStr := a.(string) + total += len(aStr) + } + builder := strings.Builder{} + builder.Grow(total) + for _, arg := range args { + builder.WriteByte(0) + builder.WriteString(arg.(string)) + } + key := builder.String() + + // ...and see if we've already calculated this. + v, found := memorized.Load(key) + if found { + return v, nil + } + + // If not, do the calculation. + // There are guaranteed to be exactly 2 or 3 arguments. + name1, name2 := args[0].(string), args[1].(string) + if rm == nil { + v = name1 == name2 + } else if len(args) == 2 { + v, _ = rm.HasLink(name1, name2) + } else { + domain := args[2].(string) + v, _ = rm.HasLink(name1, name2, domain) + } + + memorized.Store(key, v) + return v, nil + } +} + +// GenerateConditionalGFunction is the factory method of the g(_, _[, _]) function with conditions. +func GenerateConditionalGFunction(crm rbac.ConditionalRoleManager) govaluate.ExpressionFunction { + return func(args ...interface{}) (interface{}, error) { + // Like all our other govaluate functions, all args are strings. + var hasLink bool + + name1, name2 := args[0].(string), args[1].(string) + if crm == nil { + hasLink = name1 == name2 + } else if len(args) == 2 { + hasLink, _ = crm.HasLink(name1, name2) + } else { + domain := args[2].(string) + hasLink, _ = crm.HasLink(name1, name2, domain) + } + + return hasLink, nil + } +} + +// builtin LinkConditionFunc + +// TimeMatchFunc is the wrapper for TimeMatch. +func TimeMatchFunc(args ...string) (bool, error) { + if err := validateVariadicStringArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %w", "TimeMatch", err) + } + return TimeMatch(args[0], args[1]) +} + +// TimeMatch determines whether the current time is between startTime and endTime. +// You can use "_" to indicate that the parameter is ignored. +func TimeMatch(startTime, endTime string) (bool, error) { + now := time.Now() + if startTime != "_" { + if start, err := time.Parse("2006-01-02 15:04:05", startTime); err != nil { + return false, err + } else if !now.After(start) { + return false, nil + } + } + + if endTime != "_" { + if end, err := time.Parse("2006-01-02 15:04:05", endTime); err != nil { + return false, err + } else if !now.Before(end) { + return false, nil + } + } + + return true, nil +} diff --git a/vendor/github.com/casbin/casbin/v2/util/util.go b/vendor/github.com/casbin/casbin/v2/util/util.go new file mode 100644 index 000000000..f247b27b4 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/util/util.go @@ -0,0 +1,383 @@ +// Copyright 2017 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "encoding/json" + "regexp" + "sort" + "strings" + "sync" +) + +var evalReg = regexp.MustCompile(`\beval\((?P[^)]*)\)`) + +var escapeAssertionRegex = regexp.MustCompile(`\b((r|p)[0-9]*)\.`) + +func JsonToMap(jsonStr string) (map[string]interface{}, error) { + result := make(map[string]interface{}) + err := json.Unmarshal([]byte(jsonStr), &result) + if err != nil { + return result, err + } + return result, nil +} + +// EscapeAssertion escapes the dots in the assertion, because the expression evaluation doesn't support such variable names. +func EscapeAssertion(s string) string { + s = escapeAssertionRegex.ReplaceAllStringFunc(s, func(m string) string { + return strings.Replace(m, ".", "_", 1) + }) + return s +} + +// RemoveComments removes the comments starting with # in the text. +func RemoveComments(s string) string { + pos := strings.Index(s, "#") + if pos == -1 { + return s + } + return strings.TrimSpace(s[0:pos]) +} + +// ArrayEquals determines whether two string arrays are identical. +func ArrayEquals(a []string, b []string) bool { + if len(a) != len(b) { + return false + } + + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +// Array2DEquals determines whether two 2-dimensional string arrays are identical. +func Array2DEquals(a [][]string, b [][]string) bool { + if len(a) != len(b) { + return false + } + + for i, v := range a { + if !ArrayEquals(v, b[i]) { + return false + } + } + return true +} + +// SortArray2D Sorts the two-dimensional string array. +func SortArray2D(arr [][]string) { + if len(arr) != 0 { + sort.Slice(arr, func(i, j int) bool { + elementLen := len(arr[0]) + for k := 0; k < elementLen; k++ { + if arr[i][k] < arr[j][k] { + return true + } else if arr[i][k] > arr[j][k] { + return false + } + } + return true + }) + } +} + +// SortedArray2DEquals determines whether two 2-dimensional string arrays are identical. +func SortedArray2DEquals(a [][]string, b [][]string) bool { + if len(a) != len(b) { + return false + } + copyA := make([][]string, len(a)) + copy(copyA, a) + copyB := make([][]string, len(b)) + copy(copyB, b) + + SortArray2D(copyA) + SortArray2D(copyB) + + for i, v := range copyA { + if !ArrayEquals(v, copyB[i]) { + return false + } + } + return true +} + +// ArrayRemoveDuplicates removes any duplicated elements in a string array. +func ArrayRemoveDuplicates(s *[]string) { + found := make(map[string]bool) + j := 0 + for i, x := range *s { + if !found[x] { + found[x] = true + (*s)[j] = (*s)[i] + j++ + } + } + *s = (*s)[:j] +} + +// ArrayToString gets a printable string for a string array. +func ArrayToString(s []string) string { + return strings.Join(s, ", ") +} + +// ParamsToString gets a printable string for variable number of parameters. +func ParamsToString(s ...string) string { + return strings.Join(s, ", ") +} + +// SetEquals determines whether two string sets are identical. +func SetEquals(a []string, b []string) bool { + if len(a) != len(b) { + return false + } + + sort.Strings(a) + sort.Strings(b) + + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +// SetEquals determines whether two int sets are identical. +func SetEqualsInt(a []int, b []int) bool { + if len(a) != len(b) { + return false + } + + sort.Ints(a) + sort.Ints(b) + + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +// Set2DEquals determines whether two string slice sets are identical. +func Set2DEquals(a [][]string, b [][]string) bool { + if len(a) != len(b) { + return false + } + + var aa []string + for _, v := range a { + sort.Strings(v) + aa = append(aa, strings.Join(v, ", ")) + } + var bb []string + for _, v := range b { + sort.Strings(v) + bb = append(bb, strings.Join(v, ", ")) + } + + return SetEquals(aa, bb) +} + +// JoinSlice joins a string and a slice into a new slice. +func JoinSlice(a string, b ...string) []string { + res := make([]string, 0, len(b)+1) + + res = append(res, a) + res = append(res, b...) + + return res +} + +// JoinSliceAny joins a string and a slice into a new interface{} slice. +func JoinSliceAny(a string, b ...string) []interface{} { + res := make([]interface{}, 0, len(b)+1) + + res = append(res, a) + for _, s := range b { + res = append(res, s) + } + + return res +} + +// SetSubtract returns the elements in `a` that aren't in `b`. +func SetSubtract(a []string, b []string) []string { + mb := make(map[string]struct{}, len(b)) + for _, x := range b { + mb[x] = struct{}{} + } + var diff []string + for _, x := range a { + if _, found := mb[x]; !found { + diff = append(diff, x) + } + } + return diff +} + +// HasEval determine whether matcher contains function eval. +func HasEval(s string) bool { + return evalReg.MatchString(s) +} + +// ReplaceEval replace function eval with the value of its parameters. +func ReplaceEval(s string, rule string) string { + return evalReg.ReplaceAllString(s, "("+rule+")") +} + +// ReplaceEvalWithMap replace function eval with the value of its parameters via given sets. +func ReplaceEvalWithMap(src string, sets map[string]string) string { + return evalReg.ReplaceAllStringFunc(src, func(s string) string { + subs := evalReg.FindStringSubmatch(s) + if subs == nil { + return s + } + key := subs[1] + value, found := sets[key] + if !found { + return s + } + return evalReg.ReplaceAllString(s, value) + }) +} + +// GetEvalValue returns the parameters of function eval. +func GetEvalValue(s string) []string { + subMatch := evalReg.FindAllStringSubmatch(s, -1) + var rules []string + for _, rule := range subMatch { + rules = append(rules, rule[1]) + } + return rules +} + +func RemoveDuplicateElement(s []string) []string { + result := make([]string, 0, len(s)) + temp := map[string]struct{}{} + for _, item := range s { + if _, ok := temp[item]; !ok { + temp[item] = struct{}{} + result = append(result, item) + } + } + return result +} + +type node struct { + key interface{} + value interface{} + prev *node + next *node +} + +type LRUCache struct { + capacity int + m map[interface{}]*node + head *node + tail *node +} + +func NewLRUCache(capacity int) *LRUCache { + cache := &LRUCache{} + cache.capacity = capacity + cache.m = map[interface{}]*node{} + + head := &node{} + tail := &node{} + + head.next = tail + tail.prev = head + + cache.head = head + cache.tail = tail + + return cache +} + +func (cache *LRUCache) remove(n *node, listOnly bool) { + if !listOnly { + delete(cache.m, n.key) + } + n.prev.next = n.next + n.next.prev = n.prev +} + +func (cache *LRUCache) add(n *node, listOnly bool) { + if !listOnly { + cache.m[n.key] = n + } + headNext := cache.head.next + cache.head.next = n + headNext.prev = n + n.next = headNext + n.prev = cache.head +} + +func (cache *LRUCache) moveToHead(n *node) { + cache.remove(n, true) + cache.add(n, true) +} + +func (cache *LRUCache) Get(key interface{}) (value interface{}, ok bool) { + n, ok := cache.m[key] + if ok { + cache.moveToHead(n) + return n.value, ok + } else { + return nil, ok + } +} + +func (cache *LRUCache) Put(key interface{}, value interface{}) { + n, ok := cache.m[key] + if ok { + cache.remove(n, false) + } else { + n = &node{key, value, nil, nil} + if len(cache.m) >= cache.capacity { + cache.remove(cache.tail.prev, false) + } + } + cache.add(n, false) +} + +type SyncLRUCache struct { + rwm sync.RWMutex + *LRUCache +} + +func NewSyncLRUCache(capacity int) *SyncLRUCache { + cache := &SyncLRUCache{} + cache.LRUCache = NewLRUCache(capacity) + return cache +} + +func (cache *SyncLRUCache) Get(key interface{}) (value interface{}, ok bool) { + cache.rwm.Lock() + defer cache.rwm.Unlock() + return cache.LRUCache.Get(key) +} + +func (cache *SyncLRUCache) Put(key interface{}, value interface{}) { + cache.rwm.Lock() + defer cache.rwm.Unlock() + cache.LRUCache.Put(key, value) +} diff --git a/vendor/github.com/casbin/govaluate/.gitignore b/vendor/github.com/casbin/govaluate/.gitignore new file mode 100644 index 000000000..da210fb31 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/.gitignore @@ -0,0 +1,28 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +coverage.out + +manual_test.go +*.out +*.err diff --git a/vendor/github.com/casbin/govaluate/.releaserc.json b/vendor/github.com/casbin/govaluate/.releaserc.json new file mode 100644 index 000000000..58cb0bb4c --- /dev/null +++ b/vendor/github.com/casbin/govaluate/.releaserc.json @@ -0,0 +1,16 @@ +{ + "debug": true, + "branches": [ + "+([0-9])?(.{+([0-9]),x}).x", + "master", + { + "name": "beta", + "prerelease": true + } + ], + "plugins": [ + "@semantic-release/commit-analyzer", + "@semantic-release/release-notes-generator", + "@semantic-release/github" + ] +} diff --git a/vendor/github.com/casbin/govaluate/CONTRIBUTORS b/vendor/github.com/casbin/govaluate/CONTRIBUTORS new file mode 100644 index 000000000..c1a7fe42d --- /dev/null +++ b/vendor/github.com/casbin/govaluate/CONTRIBUTORS @@ -0,0 +1,15 @@ +This library was authored by George Lester, and contains contributions from: + +vjeantet (regex support) +iasci (ternary operator) +oxtoacart (parameter structures, deferred parameter retrieval) +wmiller848 (bitwise operators) +prashantv (optimization of bools) +dpaolella (exposure of variables used in an expression) +benpaxton (fix for missing type checks during literal elide process) +abrander (panic-finding testing tool, float32 conversions) +xfennec (fix for dates being parsed in the current Location) +bgaifullin (lifting restriction on complex/struct types) +gautambt (hexadecimal literals) +felixonmars (fix multiple typos in test names) +sambonfire (automatic type conversion for accessor function calls) \ No newline at end of file diff --git a/vendor/github.com/casbin/govaluate/EvaluableExpression.go b/vendor/github.com/casbin/govaluate/EvaluableExpression.go new file mode 100644 index 000000000..a5fe50d47 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/EvaluableExpression.go @@ -0,0 +1,276 @@ +package govaluate + +import ( + "errors" + "fmt" +) + +const isoDateFormat string = "2006-01-02T15:04:05.999999999Z0700" +const shortCircuitHolder int = -1 + +var DUMMY_PARAMETERS = MapParameters(map[string]interface{}{}) + +/* + EvaluableExpression represents a set of ExpressionTokens which, taken together, + are an expression that can be evaluated down into a single value. +*/ +type EvaluableExpression struct { + + /* + Represents the query format used to output dates. Typically only used when creating SQL or Mongo queries from an expression. + Defaults to the complete ISO8601 format, including nanoseconds. + */ + QueryDateFormat string + + /* + Whether or not to safely check types when evaluating. + If true, this library will return error messages when invalid types are used. + If false, the library will panic when operators encounter types they can't use. + + This is exclusively for users who need to squeeze every ounce of speed out of the library as they can, + and you should only set this to false if you know exactly what you're doing. + */ + ChecksTypes bool + + tokens []ExpressionToken + evaluationStages *evaluationStage + inputExpression string +} + +/* + Parses a new EvaluableExpression from the given [expression] string. + Returns an error if the given expression has invalid syntax. +*/ +func NewEvaluableExpression(expression string) (*EvaluableExpression, error) { + + functions := make(map[string]ExpressionFunction) + return NewEvaluableExpressionWithFunctions(expression, functions) +} + +/* + Similar to [NewEvaluableExpression], except that instead of a string, an already-tokenized expression is given. + This is useful in cases where you may be generating an expression automatically, or using some other parser (e.g., to parse from a query language) +*/ +func NewEvaluableExpressionFromTokens(tokens []ExpressionToken) (*EvaluableExpression, error) { + + var ret *EvaluableExpression + var err error + + ret = new(EvaluableExpression) + ret.QueryDateFormat = isoDateFormat + + err = checkBalance(tokens) + if err != nil { + return nil, err + } + + err = checkExpressionSyntax(tokens) + if err != nil { + return nil, err + } + + ret.tokens, err = optimizeTokens(tokens) + if err != nil { + return nil, err + } + + ret.evaluationStages, err = planStages(ret.tokens) + if err != nil { + return nil, err + } + + ret.ChecksTypes = true + return ret, nil +} + +/* + Similar to [NewEvaluableExpression], except enables the use of user-defined functions. + Functions passed into this will be available to the expression. +*/ +func NewEvaluableExpressionWithFunctions(expression string, functions map[string]ExpressionFunction) (*EvaluableExpression, error) { + + var ret *EvaluableExpression + var err error + + ret = new(EvaluableExpression) + ret.QueryDateFormat = isoDateFormat + ret.inputExpression = expression + + ret.tokens, err = parseTokens(expression, functions) + if err != nil { + return nil, err + } + + err = checkBalance(ret.tokens) + if err != nil { + return nil, err + } + + err = checkExpressionSyntax(ret.tokens) + if err != nil { + return nil, err + } + + ret.tokens, err = optimizeTokens(ret.tokens) + if err != nil { + return nil, err + } + + ret.evaluationStages, err = planStages(ret.tokens) + if err != nil { + return nil, err + } + + ret.ChecksTypes = true + return ret, nil +} + +/* + Same as `Eval`, but automatically wraps a map of parameters into a `govalute.Parameters` structure. +*/ +func (this EvaluableExpression) Evaluate(parameters map[string]interface{}) (interface{}, error) { + + if parameters == nil { + return this.Eval(nil) + } + + return this.Eval(MapParameters(parameters)) +} + +/* + Runs the entire expression using the given [parameters]. + e.g., If the expression contains a reference to the variable "foo", it will be taken from `parameters.Get("foo")`. + + This function returns errors if the combination of expression and parameters cannot be run, + such as if a variable in the expression is not present in [parameters]. + + In all non-error circumstances, this returns the single value result of the expression and parameters given. + e.g., if the expression is "1 + 1", this will return 2.0. + e.g., if the expression is "foo + 1" and parameters contains "foo" = 2, this will return 3.0 +*/ +func (this EvaluableExpression) Eval(parameters Parameters) (interface{}, error) { + + if this.evaluationStages == nil { + return nil, nil + } + + if parameters != nil { + parameters = &sanitizedParameters{parameters} + } else { + parameters = DUMMY_PARAMETERS + } + + return this.evaluateStage(this.evaluationStages, parameters) +} + +func (this EvaluableExpression) evaluateStage(stage *evaluationStage, parameters Parameters) (interface{}, error) { + + var left, right interface{} + var err error + + if stage.leftStage != nil { + left, err = this.evaluateStage(stage.leftStage, parameters) + if err != nil { + return nil, err + } + } + + if stage.isShortCircuitable() { + switch stage.symbol { + case AND: + if left == false { + return false, nil + } + case OR: + if left == true { + return true, nil + } + case COALESCE: + if left != nil { + return left, nil + } + + case TERNARY_TRUE: + if left == false { + right = shortCircuitHolder + } + case TERNARY_FALSE: + if left != nil { + right = shortCircuitHolder + } + } + } + + if right != shortCircuitHolder && stage.rightStage != nil { + right, err = this.evaluateStage(stage.rightStage, parameters) + if err != nil { + return nil, err + } + } + + if this.ChecksTypes { + if stage.typeCheck == nil { + + err = typeCheck(stage.leftTypeCheck, left, stage.symbol, stage.typeErrorFormat) + if err != nil { + return nil, err + } + + err = typeCheck(stage.rightTypeCheck, right, stage.symbol, stage.typeErrorFormat) + if err != nil { + return nil, err + } + } else { + // special case where the type check needs to know both sides to determine if the operator can handle it + if !stage.typeCheck(left, right) { + errorMsg := fmt.Sprintf(stage.typeErrorFormat, left, stage.symbol.String()) + return nil, errors.New(errorMsg) + } + } + } + + return stage.operator(left, right, parameters) +} + +func typeCheck(check stageTypeCheck, value interface{}, symbol OperatorSymbol, format string) error { + + if check == nil { + return nil + } + + if check(value) { + return nil + } + + errorMsg := fmt.Sprintf(format, value, symbol.String()) + return errors.New(errorMsg) +} + +/* + Returns an array representing the ExpressionTokens that make up this expression. +*/ +func (this EvaluableExpression) Tokens() []ExpressionToken { + + return this.tokens +} + +/* + Returns the original expression used to create this EvaluableExpression. +*/ +func (this EvaluableExpression) String() string { + + return this.inputExpression +} + +/* + Returns an array representing the variables contained in this EvaluableExpression. +*/ +func (this EvaluableExpression) Vars() []string { + var varlist []string + for _, val := range this.Tokens() { + if val.Kind == VARIABLE { + varlist = append(varlist, val.Value.(string)) + } + } + return varlist +} diff --git a/vendor/github.com/casbin/govaluate/EvaluableExpression_sql.go b/vendor/github.com/casbin/govaluate/EvaluableExpression_sql.go new file mode 100644 index 000000000..52409fa24 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/EvaluableExpression_sql.go @@ -0,0 +1,167 @@ +package govaluate + +import ( + "errors" + "fmt" + "regexp" + "time" +) + +/* +Returns a string representing this expression as if it were written in SQL. +This function assumes that all parameters exist within the same table, and that the table essentially represents +a serialized object of some sort (e.g., hibernate). +If your data model is more normalized, you may need to consider iterating through each actual token given by `Tokens()` +to create your query. + +Boolean values are considered to be "1" for true, "0" for false. + +Times are formatted according to this.QueryDateFormat. +*/ +func (this EvaluableExpression) ToSQLQuery() (string, error) { + + var stream *tokenStream + var transactions *expressionOutputStream + var transaction string + var err error + + stream = newTokenStream(this.tokens) + transactions = new(expressionOutputStream) + + for stream.hasNext() { + + transaction, err = this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + transactions.add(transaction) + } + + return transactions.createString(" "), nil +} + +func (this EvaluableExpression) findNextSQLString(stream *tokenStream, transactions *expressionOutputStream) (string, error) { + + var token ExpressionToken + var ret string + + token = stream.next() + + switch token.Kind { + + case STRING: + ret = fmt.Sprintf("'%v'", token.Value) + case PATTERN: + ret = fmt.Sprintf("'%s'", token.Value.(*regexp.Regexp).String()) + case TIME: + ret = fmt.Sprintf("'%s'", token.Value.(time.Time).Format(this.QueryDateFormat)) + + case LOGICALOP: + switch logicalSymbols[token.Value.(string)] { + + case AND: + ret = "AND" + case OR: + ret = "OR" + } + + case BOOLEAN: + if token.Value.(bool) { + ret = "1" + } else { + ret = "0" + } + + case VARIABLE: + ret = fmt.Sprintf("[%s]", token.Value.(string)) + + case NUMERIC: + ret = fmt.Sprintf("%g", token.Value.(float64)) + + case COMPARATOR: + switch comparatorSymbols[token.Value.(string)] { + + case EQ: + ret = "=" + case NEQ: + ret = "<>" + case REQ: + ret = "RLIKE" + case NREQ: + ret = "NOT RLIKE" + default: + ret = fmt.Sprintf("%s", token.Value) + } + + case TERNARY: + + switch ternarySymbols[token.Value.(string)] { + + case COALESCE: + + left := transactions.rollback() + right, err := this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + ret = fmt.Sprintf("COALESCE(%v, %v)", left, right) + case TERNARY_TRUE: + fallthrough + case TERNARY_FALSE: + return "", errors.New("Ternary operators are unsupported in SQL output") + } + case PREFIX: + switch prefixSymbols[token.Value.(string)] { + + case INVERT: + ret = "NOT" + default: + + right, err := this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + ret = fmt.Sprintf("%s%s", token.Value.(string), right) + } + case MODIFIER: + + switch modifierSymbols[token.Value.(string)] { + + case EXPONENT: + + left := transactions.rollback() + right, err := this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + ret = fmt.Sprintf("POW(%s, %s)", left, right) + case MODULUS: + + left := transactions.rollback() + right, err := this.findNextSQLString(stream, transactions) + if err != nil { + return "", err + } + + ret = fmt.Sprintf("MOD(%s, %s)", left, right) + default: + ret = fmt.Sprintf("%s", token.Value) + } + case CLAUSE: + ret = "(" + case CLAUSE_CLOSE: + ret = ")" + case SEPARATOR: + ret = "," + + default: + errorMsg := fmt.Sprintf("Unrecognized query token '%s' of kind '%s'", token.Value, token.Kind) + return "", errors.New(errorMsg) + } + + return ret, nil +} diff --git a/vendor/github.com/casbin/govaluate/ExpressionToken.go b/vendor/github.com/casbin/govaluate/ExpressionToken.go new file mode 100644 index 000000000..f849f3813 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/ExpressionToken.go @@ -0,0 +1,9 @@ +package govaluate + +/* + Represents a single parsed token. +*/ +type ExpressionToken struct { + Kind TokenKind + Value interface{} +} diff --git a/vendor/github.com/casbin/govaluate/LICENSE b/vendor/github.com/casbin/govaluate/LICENSE new file mode 100644 index 000000000..24b9b4591 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2016 George Lester + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/casbin/govaluate/MANUAL.md b/vendor/github.com/casbin/govaluate/MANUAL.md new file mode 100644 index 000000000..e06582851 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/MANUAL.md @@ -0,0 +1,176 @@ +govaluate +==== + +This library contains quite a lot of functionality, this document is meant to be formal documentation on the operators and features of it. +Some of this documentation may duplicate what's in README.md, but should never conflict. + +# Types + +This library only officially deals with four types; `float64`, `bool`, `string`, and arrays. + +All numeric literals, with or without a radix, will be converted to `float64` for evaluation. For instance; in practice, there is no difference between the literals "1.0" and "1", they both end up as `float64`. This matters to users because if you intend to return numeric values from your expressions, then the returned value will be `float64`, not any other numeric type. + +Any string _literal_ (not parameter) which is interpretable as a date will be converted to a `float64` representation of that date's unix time. Any `time.Time` parameters will not be operable with these date literals; such parameters will need to use the `time.Time.Unix()` method to get a numeric representation. + +Arrays are untyped, and can be mixed-type. Internally they're all just `interface{}`. Only two operators can interact with arrays, `IN` and `,`. All other operators will refuse to operate on arrays. + +# Operators + +## Modifiers + +### Addition, concatenation `+` + +If either left or right sides of the `+` operator are a `string`, then this operator will perform string concatenation and return that result. If neither are string, then both must be numeric, and this will return a numeric result. + +Any other case is invalid. + +### Arithmetic `-` `*` `/` `**` `%` + +`**` refers to "take to the power of". For instance, `3 ** 4` == 81. + +* _Left side_: numeric +* _Right side_: numeric +* _Returns_: numeric + +### Bitwise shifts, masks `>>` `<<` `|` `&` `^` + +All of these operators convert their `float64` left and right sides to `int64`, perform their operation, and then convert back. +Given how this library assumes numeric are represented (as `float64`), it is unlikely that this behavior will change, even though it may cause havoc with extremely large or small numbers. + +* _Left side_: numeric +* _Right side_: numeric +* _Returns_: numeric + +### Negation `-` + +Prefix only. This can never have a left-hand value. + +* _Right side_: numeric +* _Returns_: numeric + +### Inversion `!` + +Prefix only. This can never have a left-hand value. + +* _Right side_: bool +* _Returns_: bool + +### Bitwise NOT `~` + +Prefix only. This can never have a left-hand value. + +* _Right side_: numeric +* _Returns_: numeric + +## Logical Operators + +For all logical operators, this library will short-circuit the operation if the left-hand side is sufficient to determine what to do. For instance, `true || expensiveOperation()` will not actually call `expensiveOperation()`, since it knows the left-hand side is `true`. + +### Logical AND/OR `&&` `||` + +* _Left side_: bool +* _Right side_: bool +* _Returns_: bool + +### Ternary true `?` + +Checks if the left side is `true`. If so, returns the right side. If the left side is `false`, returns `nil`. +In practice, this is commonly used with the other ternary operator. + +* _Left side_: bool +* _Right side_: Any type. +* _Returns_: Right side or `nil` + +### Ternary false `:` + +Checks if the left side is `nil`. If so, returns the right side. If the left side is non-nil, returns the left side. +In practice, this is commonly used with the other ternary operator. + +* _Left side_: Any type. +* _Right side_: Any type. +* _Returns_: Right side or `nil` + +### Null coalescence `??` + +Similar to the C# operator. If the left value is non-nil, it returns that. If not, then the right-value is returned. + +* _Left side_: Any type. +* _Right side_: Any type. +* _Returns_: No specific type - whichever is passed to it. + +## Comparators + +### Numeric/lexicographic comparators `>` `<` `>=` `<=` + +If both sides are numeric, this returns the usual greater/lesser behavior that would be expected. +If both sides are string, this returns the lexicographic comparison of the strings. This uses Go's standard lexicographic compare. + +* _Accepts_: Left and right side must either be both string, or both numeric. +* _Returns_: bool + +### Regex comparators `=~` `!~` + +These use go's standard `regexp` flavor of regex. The left side is expected to be the candidate string, the right side is the pattern. `=~` returns whether or not the candidate string matches the regex pattern given on the right. `!~` is the inverted version of the same logic. + +* _Left side_: string +* _Right side_: string +* _Returns_: bool + +## Arrays + +### Separator `,` + +The separator, always paired with parenthesis, creates arrays. It must always have both a left and right-hand value, so for instance `(, 0)` and `(0,)` are invalid uses of it. + +Again, this should always be used with parenthesis; like `(1, 2, 3, 4)`. + +### Membership `IN` + +The only operator with a text name, this operator checks the right-hand side array to see if it contains a value that is equal to the left-side value. +Equality is determined by the use of the `==` operator, and this library doesn't check types between the values. Any two values, when cast to `interface{}`, and can still be checked for equality with `==` will act as expected. + +Note that you can use a parameter for the array, but it must be an `[]interface{}`. + +* _Left side_: Any type. +* _Right side_: array +* _Returns_: bool + +# Parameters + +Parameters must be passed in every time the expression is evaluated. Parameters can be of any type, but will not cause errors unless actually used in an erroneous way. There is no difference in behavior for any of the above operators for parameters - they are type checked when used. + +All `int` and `float` values of any width will be converted to `float64` before use. + +At no point is the parameter structure, or any value thereof, modified by this library. + +## Alternates to maps + +The default form of parameters as a map may not serve your use case. You may have parameters in some other structure, you may want to change the no-parameter-found behavior, or maybe even just have some debugging print statements invoked when a parameter is accessed. + +To do this, define a type that implements the `govaluate.Parameters` interface. When you want to evaluate, instead call `EvaluableExpression.Eval` and pass your parameter structure. + +# Functions + +During expression parsing (_not_ evaluation), a map of functions can be given to `govaluate.NewEvaluableExpressionWithFunctions` (the lengthiest and finest of function names). The resultant expression will be able to invoke those functions during evaluation. Once parsed, an expression cannot have functions added or removed - a new expression will need to be created if you want to change the functions, or behavior of said functions. + +Functions always take the form `()`, including parens. Functions can have an empty list of parameters, like `()`, but still must have parens. + +If the expression contains something that looks like it ought to be a function (such as `foo()`), but no such function was given to it, it will error on parsing. + +Functions must be of type `map[string]govaluate.ExpressionFunction`. `ExpressionFunction`, for brevity, has the following signature: + +`func(args ...interface{}) (interface{}, error)` + +Where `args` is whatever is passed to the function when called. If a non-nil error is returned from a function during evaluation, the evaluation stops and ultimately returns that error to the caller of `Evaluate()` or `Eval()`. + +## Built-in functions + +There aren't any builtin functions. The author is opposed to maintaining a standard library of functions to be used. + +Every use case of this library is different, and even in simple use cases (such as parameters, see above) different users need different behavior, naming, or even functionality. The author prefers that users make their own decisions about what functions they need, and how they operate. + +# Equality + +The `==` and `!=` operators involve a moderately complex workflow. They use [`reflect.DeepEqual`](https://golang.org/pkg/reflect/#DeepEqual). This is for complicated reasons, but there are some types in Go that cannot be compared with the native `==` operator. Arrays, in particular, cannot be compared - Go will panic if you try. One might assume this could be handled with the type checking system in `govaluate`, but unfortunately without reflection there is no way to know if a variable is a slice/array. Worse, structs can be incomparable if they _contain incomparable types_. + +It's all very complicated. Fortunately, Go includes the `reflect.DeepEqual` function to handle all the edge cases. Currently, `govaluate` uses that for all equality/inequality. diff --git a/vendor/github.com/casbin/govaluate/OperatorSymbol.go b/vendor/github.com/casbin/govaluate/OperatorSymbol.go new file mode 100644 index 000000000..4b810658b --- /dev/null +++ b/vendor/github.com/casbin/govaluate/OperatorSymbol.go @@ -0,0 +1,309 @@ +package govaluate + +/* + Represents the valid symbols for operators. + +*/ +type OperatorSymbol int + +const ( + VALUE OperatorSymbol = iota + LITERAL + NOOP + EQ + NEQ + GT + LT + GTE + LTE + REQ + NREQ + IN + + AND + OR + + PLUS + MINUS + BITWISE_AND + BITWISE_OR + BITWISE_XOR + BITWISE_LSHIFT + BITWISE_RSHIFT + MULTIPLY + DIVIDE + MODULUS + EXPONENT + + NEGATE + INVERT + BITWISE_NOT + + TERNARY_TRUE + TERNARY_FALSE + COALESCE + + FUNCTIONAL + ACCESS + SEPARATE +) + +type operatorPrecedence int + +const ( + noopPrecedence operatorPrecedence = iota + valuePrecedence + functionalPrecedence + prefixPrecedence + exponentialPrecedence + additivePrecedence + bitwisePrecedence + bitwiseShiftPrecedence + multiplicativePrecedence + comparatorPrecedence + ternaryPrecedence + logicalAndPrecedence + logicalOrPrecedence + separatePrecedence +) + +func findOperatorPrecedenceForSymbol(symbol OperatorSymbol) operatorPrecedence { + + switch symbol { + case NOOP: + return noopPrecedence + case VALUE: + return valuePrecedence + case EQ: + fallthrough + case NEQ: + fallthrough + case GT: + fallthrough + case LT: + fallthrough + case GTE: + fallthrough + case LTE: + fallthrough + case REQ: + fallthrough + case NREQ: + fallthrough + case IN: + return comparatorPrecedence + case AND: + return logicalAndPrecedence + case OR: + return logicalOrPrecedence + case BITWISE_AND: + fallthrough + case BITWISE_OR: + fallthrough + case BITWISE_XOR: + return bitwisePrecedence + case BITWISE_LSHIFT: + fallthrough + case BITWISE_RSHIFT: + return bitwiseShiftPrecedence + case PLUS: + fallthrough + case MINUS: + return additivePrecedence + case MULTIPLY: + fallthrough + case DIVIDE: + fallthrough + case MODULUS: + return multiplicativePrecedence + case EXPONENT: + return exponentialPrecedence + case BITWISE_NOT: + fallthrough + case NEGATE: + fallthrough + case INVERT: + return prefixPrecedence + case COALESCE: + fallthrough + case TERNARY_TRUE: + fallthrough + case TERNARY_FALSE: + return ternaryPrecedence + case ACCESS: + fallthrough + case FUNCTIONAL: + return functionalPrecedence + case SEPARATE: + return separatePrecedence + } + + return valuePrecedence +} + +/* + Map of all valid comparators, and their string equivalents. + Used during parsing of expressions to determine if a symbol is, in fact, a comparator. + Also used during evaluation to determine exactly which comparator is being used. +*/ +var comparatorSymbols = map[string]OperatorSymbol{ + "==": EQ, + "!=": NEQ, + ">": GT, + ">=": GTE, + "<": LT, + "<=": LTE, + "=~": REQ, + "!~": NREQ, + "in": IN, +} + +var logicalSymbols = map[string]OperatorSymbol{ + "&&": AND, + "||": OR, +} + +var bitwiseSymbols = map[string]OperatorSymbol{ + "^": BITWISE_XOR, + "&": BITWISE_AND, + "|": BITWISE_OR, +} + +var bitwiseShiftSymbols = map[string]OperatorSymbol{ + ">>": BITWISE_RSHIFT, + "<<": BITWISE_LSHIFT, +} + +var additiveSymbols = map[string]OperatorSymbol{ + "+": PLUS, + "-": MINUS, +} + +var multiplicativeSymbols = map[string]OperatorSymbol{ + "*": MULTIPLY, + "/": DIVIDE, + "%": MODULUS, +} + +var exponentialSymbolsS = map[string]OperatorSymbol{ + "**": EXPONENT, +} + +var prefixSymbols = map[string]OperatorSymbol{ + "-": NEGATE, + "!": INVERT, + "~": BITWISE_NOT, +} + +var ternarySymbols = map[string]OperatorSymbol{ + "?": TERNARY_TRUE, + ":": TERNARY_FALSE, + "??": COALESCE, +} + +// this is defined separately from additiveSymbols et al because it's needed for parsing, not stage planning. +var modifierSymbols = map[string]OperatorSymbol{ + "+": PLUS, + "-": MINUS, + "*": MULTIPLY, + "/": DIVIDE, + "%": MODULUS, + "**": EXPONENT, + "&": BITWISE_AND, + "|": BITWISE_OR, + "^": BITWISE_XOR, + ">>": BITWISE_RSHIFT, + "<<": BITWISE_LSHIFT, +} + +var separatorSymbols = map[string]OperatorSymbol{ + ",": SEPARATE, +} + +/* + Returns true if this operator is contained by the given array of candidate symbols. + False otherwise. +*/ +func (this OperatorSymbol) IsModifierType(candidate []OperatorSymbol) bool { + + for _, symbolType := range candidate { + if this == symbolType { + return true + } + } + + return false +} + +/* + Generally used when formatting type check errors. + We could store the stringified symbol somewhere else and not require a duplicated codeblock to translate + OperatorSymbol to string, but that would require more memory, and another field somewhere. + Adding operators is rare enough that we just stringify it here instead. +*/ +func (this OperatorSymbol) String() string { + + switch this { + case NOOP: + return "NOOP" + case VALUE: + return "VALUE" + case EQ: + return "=" + case NEQ: + return "!=" + case GT: + return ">" + case LT: + return "<" + case GTE: + return ">=" + case LTE: + return "<=" + case REQ: + return "=~" + case NREQ: + return "!~" + case AND: + return "&&" + case OR: + return "||" + case IN: + return "in" + case BITWISE_AND: + return "&" + case BITWISE_OR: + return "|" + case BITWISE_XOR: + return "^" + case BITWISE_LSHIFT: + return "<<" + case BITWISE_RSHIFT: + return ">>" + case PLUS: + return "+" + case MINUS: + return "-" + case MULTIPLY: + return "*" + case DIVIDE: + return "/" + case MODULUS: + return "%" + case EXPONENT: + return "**" + case NEGATE: + return "-" + case INVERT: + return "!" + case BITWISE_NOT: + return "~" + case TERNARY_TRUE: + return "?" + case TERNARY_FALSE: + return ":" + case COALESCE: + return "??" + } + return "" +} diff --git a/vendor/github.com/casbin/govaluate/README.md b/vendor/github.com/casbin/govaluate/README.md new file mode 100644 index 000000000..576a9df19 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/README.md @@ -0,0 +1,232 @@ +govaluate +==== + +[![Build Status](https://github.com/casbin/govaluate/actions/workflows/build.yml/badge.svg)](https://github.com/casbin/govaluate/actions/workflows/build.yml) +[![Godoc](https://godoc.org/github.com/casbin/govaluate?status.svg)](https://pkg.go.dev/github.com/casbin/govaluate) +[![Go Report Card](https://goreportcard.com/badge/github.com/casbin/govaluate)](https://goreportcard.com/report/github.com/casbin/govaluate) + +Provides support for evaluating arbitrary C-like artithmetic/string expressions. + +Why can't you just write these expressions in code? +-- + +Sometimes, you can't know ahead-of-time what an expression will look like, or you want those expressions to be configurable. +Perhaps you've got a set of data running through your application, and you want to allow your users to specify some validations to run on it before committing it to a database. Or maybe you've written a monitoring framework which is capable of gathering a bunch of metrics, then evaluating a few expressions to see if any metrics should be alerted upon, but the conditions for alerting are different for each monitor. + +A lot of people wind up writing their own half-baked style of evaluation language that fits their needs, but isn't complete. Or they wind up baking the expression into the actual executable, even if they know it's subject to change. These strategies may work, but they take time to implement, time for users to learn, and induce technical debt as requirements change. This library is meant to cover all the normal C-like expressions, so that you don't have to reinvent one of the oldest wheels on a computer. + +How do I use it? +-- + +You create a new EvaluableExpression, then call "Evaluate" on it. + +```go + expression, err := govaluate.NewEvaluableExpression("10 > 0"); + result, err := expression.Evaluate(nil); + // result is now set to "true", the bool value. +``` + +Cool, but how about with parameters? + +```go + expression, err := govaluate.NewEvaluableExpression("foo > 0"); + + parameters := make(map[string]interface{}, 8) + parameters["foo"] = -1; + + result, err := expression.Evaluate(parameters); + // result is now set to "false", the bool value. +``` + +That's cool, but we can almost certainly have done all that in code. What about a complex use case that involves some math? + +```go + expression, err := govaluate.NewEvaluableExpression("(requests_made * requests_succeeded / 100) >= 90"); + + parameters := make(map[string]interface{}, 8) + parameters["requests_made"] = 100; + parameters["requests_succeeded"] = 80; + + result, err := expression.Evaluate(parameters); + // result is now set to "false", the bool value. +``` + +Or maybe you want to check the status of an alive check ("smoketest") page, which will be a string? + +```go + expression, err := govaluate.NewEvaluableExpression("http_response_body == 'service is ok'"); + + parameters := make(map[string]interface{}, 8) + parameters["http_response_body"] = "service is ok"; + + result, err := expression.Evaluate(parameters); + // result is now set to "true", the bool value. +``` + +These examples have all returned boolean values, but it's equally possible to return numeric ones. + +```go + expression, err := govaluate.NewEvaluableExpression("(mem_used / total_mem) * 100"); + + parameters := make(map[string]interface{}, 8) + parameters["total_mem"] = 1024; + parameters["mem_used"] = 512; + + result, err := expression.Evaluate(parameters); + // result is now set to "50.0", the float64 value. +``` + +You can also do date parsing, though the formats are somewhat limited. Stick to RF3339, ISO8061, unix date, or ruby date formats. If you're having trouble getting a date string to parse, check the list of formats actually used: [parsing.go:248](https://github.com/casbin/govaluate/blob/0580e9b47a69125afa0e4ebd1cf93c49eb5a43ec/parsing.go#L258). + +```go + expression, err := govaluate.NewEvaluableExpression("'2014-01-02' > '2014-01-01 23:59:59'"); + result, err := expression.Evaluate(nil); + + // result is now set to true +``` + +Expressions are parsed once, and can be re-used multiple times. Parsing is the compute-intensive phase of the process, so if you intend to use the same expression with different parameters, just parse it once. Like so; + +```go + expression, err := govaluate.NewEvaluableExpression("response_time <= 100"); + parameters := make(map[string]interface{}, 8) + + for { + parameters["response_time"] = pingSomething(); + result, err := expression.Evaluate(parameters) + } +``` + +The normal C-standard order of operators is respected. When writing an expression, be sure that you either order the operators correctly, or use parenthesis to clarify which portions of an expression should be run first. + +Escaping characters +-- + +Sometimes you'll have parameters that have spaces, slashes, pluses, ampersands or some other character +that this library interprets as something special. For example, the following expression will not +act as one might expect: + + "response-time < 100" + +As written, the library will parse it as "[response] minus [time] is less than 100". In reality, +"response-time" is meant to be one variable that just happens to have a dash in it. + +There are two ways to work around this. First, you can escape the entire parameter name: + + "[response-time] < 100" + +Or you can use backslashes to escape only the minus sign. + + "response\\-time < 100" + +Backslashes can be used anywhere in an expression to escape the very next character. Square bracketed parameter names can be used instead of plain parameter names at any time. + +Functions +-- + +You may have cases where you want to call a function on a parameter during execution of the expression. Perhaps you want to aggregate some set of data, but don't know the exact aggregation you want to use until you're writing the expression itself. Or maybe you have a mathematical operation you want to perform, for which there is no operator; like `log` or `tan` or `sqrt`. For cases like this, you can provide a map of functions to `NewEvaluableExpressionWithFunctions`, which will then be able to use them during execution. For instance; + +```go + functions := map[string]govaluate.ExpressionFunction { + "strlen": func(args ...interface{}) (interface{}, error) { + length := len(args[0].(string)) + return (float64)(length), nil + }, + } + + expString := "strlen('someReallyLongInputString') <= 16" + expression, _ := govaluate.NewEvaluableExpressionWithFunctions(expString, functions) + + result, _ := expression.Evaluate(nil) + // result is now "false", the boolean value +``` + +Functions can accept any number of arguments, correctly handles nested functions, and arguments can be of any type (even if none of this library's operators support evaluation of that type). For instance, each of these usages of functions in an expression are valid (assuming that the appropriate functions and parameters are given): + +```go +"sqrt(x1 ** y1, x2 ** y2)" +"max(someValue, abs(anotherValue), 10 * lastValue)" +``` + +Functions cannot be passed as parameters, they must be known at the time when the expression is parsed, and are unchangeable after parsing. + +Accessors +-- + +If you have structs in your parameters, you can access their fields and methods in the usual way. For instance, given a struct that has a method "Echo", present in the parameters as `foo`, the following is valid: + + "foo.Echo('hello world')" + +Fields are accessed in a similar way. Assuming `foo` has a field called "Length": + + "foo.Length > 9000" + +The values of a `map` are accessed in the same way. Assuming the parameter `foo` is `map[string]int{ "bar": 1 }` + + "foo.bar == 1" + +Accessors can be nested to any depth, like the following + + "foo.Bar.Baz.SomeFunction()" + +This may be convenient, but note that using accessors involves a _lot_ of reflection. This makes the expression about four times slower than just using a parameter (consult the benchmarks for more precise measurements on your system). +If at all reasonable, the author recommends extracting the values you care about into a parameter map beforehand, or defining a struct that implements the `Parameters` interface, and which grabs fields as required. If there are functions you want to use, it's better to pass them as expression functions (see the above section). These approaches use no reflection, and are designed to be fast and clean. + +What operators and types does this support? +-- + +* Modifiers: `+` `-` `/` `*` `&` `|` `^` `**` `%` `>>` `<<` +* Comparators: `>` `>=` `<` `<=` `==` `!=` `=~` `!~` +* Logical ops: `||` `&&` +* Numeric constants, as 64-bit floating point (`12345.678`) +* String constants (single quotes: `'foobar'`) +* Date constants (single quotes, using any permutation of RFC3339, ISO8601, ruby date, or unix date; date parsing is automatically tried with any string constant) +* Boolean constants: `true` `false` +* Parenthesis to control order of evaluation `(` `)` +* Arrays (anything separated by `,` within parenthesis: `(1, 2, 'foo')`) +* Prefixes: `!` `-` `~` +* Ternary conditional: `?` `:` +* Null coalescence: `??` + +See [MANUAL.md](https://github.com/casbin/govaluate/blob/master/MANUAL.md) for exacting details on what types each operator supports. + +Types +-- + +Some operators don't make sense when used with some types. For instance, what does it mean to get the modulo of a string? What happens if you check to see if two numbers are logically AND'ed together? + +Everyone has a different intuition about the answers to these questions. To prevent confusion, this library will _refuse to operate_ upon types for which there is not an unambiguous meaning for the operation. See [MANUAL.md](https://github.com/casbin/govaluate/blob/master/MANUAL.md) for details about what operators are valid for which types. + +Benchmarks +-- + +If you're concerned about the overhead of this library, a good range of benchmarks are built into this repo. You can run them with `go test -bench=.`. The library is built with an eye towards being quick, but has not been aggressively profiled and optimized. For most applications, though, it is completely fine. + +For a very rough idea of performance, here are the results output from a benchmark run on a 3rd-gen Macbook Pro (Linux Mint 17.1). + +``` +BenchmarkSingleParse-12 1000000 1382 ns/op +BenchmarkSimpleParse-12 200000 10771 ns/op +BenchmarkFullParse-12 30000 49383 ns/op +BenchmarkEvaluationSingle-12 50000000 30.1 ns/op +BenchmarkEvaluationNumericLiteral-12 10000000 119 ns/op +BenchmarkEvaluationLiteralModifiers-12 10000000 236 ns/op +BenchmarkEvaluationParameters-12 5000000 260 ns/op +BenchmarkEvaluationParametersModifiers-12 3000000 547 ns/op +BenchmarkComplexExpression-12 2000000 963 ns/op +BenchmarkRegexExpression-12 100000 20357 ns/op +BenchmarkConstantRegexExpression-12 1000000 1392 ns/op +ok +``` + +API Breaks +-- + +While this library has very few cases which will ever result in an API break, it can happen. If you are using this in production, vendor the commit you've tested against, or use gopkg.in to redirect your import (e.g., `import "gopkg.in/casbin/govaluate.v1"`). Master branch (while infrequent) _may_ at some point contain API breaking changes, and the author will have no way to communicate these to downstreams, other than creating a new major release. + +Releases will explicitly state when an API break happens, and if they do not specify an API break it should be safe to upgrade. + +License +-- + +This project is licensed under the MIT general use license. You're free to integrate, fork, and play with this code as you feel fit without consulting the author, as long as you provide proper credit to the author in your works. diff --git a/vendor/github.com/casbin/govaluate/TokenKind.go b/vendor/github.com/casbin/govaluate/TokenKind.go new file mode 100644 index 000000000..7c9516d2d --- /dev/null +++ b/vendor/github.com/casbin/govaluate/TokenKind.go @@ -0,0 +1,75 @@ +package govaluate + +/* + Represents all valid types of tokens that a token can be. +*/ +type TokenKind int + +const ( + UNKNOWN TokenKind = iota + + PREFIX + NUMERIC + BOOLEAN + STRING + PATTERN + TIME + VARIABLE + FUNCTION + SEPARATOR + ACCESSOR + + COMPARATOR + LOGICALOP + MODIFIER + + CLAUSE + CLAUSE_CLOSE + + TERNARY +) + +/* + GetTokenKindString returns a string that describes the given TokenKind. + e.g., when passed the NUMERIC TokenKind, this returns the string "NUMERIC". +*/ +func (kind TokenKind) String() string { + + switch kind { + + case PREFIX: + return "PREFIX" + case NUMERIC: + return "NUMERIC" + case BOOLEAN: + return "BOOLEAN" + case STRING: + return "STRING" + case PATTERN: + return "PATTERN" + case TIME: + return "TIME" + case VARIABLE: + return "VARIABLE" + case FUNCTION: + return "FUNCTION" + case SEPARATOR: + return "SEPARATOR" + case COMPARATOR: + return "COMPARATOR" + case LOGICALOP: + return "LOGICALOP" + case MODIFIER: + return "MODIFIER" + case CLAUSE: + return "CLAUSE" + case CLAUSE_CLOSE: + return "CLAUSE_CLOSE" + case TERNARY: + return "TERNARY" + case ACCESSOR: + return "ACCESSOR" + } + + return "UNKNOWN" +} diff --git a/vendor/github.com/casbin/govaluate/evaluationStage.go b/vendor/github.com/casbin/govaluate/evaluationStage.go new file mode 100644 index 000000000..965040a7a --- /dev/null +++ b/vendor/github.com/casbin/govaluate/evaluationStage.go @@ -0,0 +1,542 @@ +package govaluate + +import ( + "errors" + "fmt" + "math" + "reflect" + "regexp" + "strings" + "unicode" +) + +const ( + logicalErrorFormat string = "Value '%v' cannot be used with the logical operator '%v', it is not a bool" + modifierErrorFormat string = "Value '%v' cannot be used with the modifier '%v', it is not a number" + comparatorErrorFormat string = "Value '%v' cannot be used with the comparator '%v', it is not a number" + ternaryErrorFormat string = "Value '%v' cannot be used with the ternary operator '%v', it is not a bool" + prefixErrorFormat string = "Value '%v' cannot be used with the prefix '%v'" +) + +type evaluationOperator func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) +type stageTypeCheck func(value interface{}) bool +type stageCombinedTypeCheck func(left interface{}, right interface{}) bool + +type evaluationStage struct { + symbol OperatorSymbol + + leftStage, rightStage *evaluationStage + + // the operation that will be used to evaluate this stage (such as adding [left] to [right] and return the result) + operator evaluationOperator + + // ensures that both left and right values are appropriate for this stage. Returns an error if they aren't operable. + leftTypeCheck stageTypeCheck + rightTypeCheck stageTypeCheck + + // if specified, will override whatever is used in "leftTypeCheck" and "rightTypeCheck". + // primarily used for specific operators that don't care which side a given type is on, but still requires one side to be of a given type + // (like string concat) + typeCheck stageCombinedTypeCheck + + // regardless of which type check is used, this string format will be used as the error message for type errors + typeErrorFormat string +} + +var ( + _true = interface{}(true) + _false = interface{}(false) +) + +func (this *evaluationStage) swapWith(other *evaluationStage) { + + temp := *other + other.setToNonStage(*this) + this.setToNonStage(temp) +} + +func (this *evaluationStage) setToNonStage(other evaluationStage) { + + this.symbol = other.symbol + this.operator = other.operator + this.leftTypeCheck = other.leftTypeCheck + this.rightTypeCheck = other.rightTypeCheck + this.typeCheck = other.typeCheck + this.typeErrorFormat = other.typeErrorFormat +} + +func (this *evaluationStage) isShortCircuitable() bool { + + switch this.symbol { + case AND: + fallthrough + case OR: + fallthrough + case TERNARY_TRUE: + fallthrough + case TERNARY_FALSE: + fallthrough + case COALESCE: + return true + } + + return false +} + +func noopStageRight(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return right, nil +} + +func addStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + // string concat if either are strings + if isString(left) || isString(right) { + return fmt.Sprintf("%v%v", left, right), nil + } + + return left.(float64) + right.(float64), nil +} +func subtractStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return left.(float64) - right.(float64), nil +} +func multiplyStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return left.(float64) * right.(float64), nil +} +func divideStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return left.(float64) / right.(float64), nil +} +func exponentStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return math.Pow(left.(float64), right.(float64)), nil +} +func modulusStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return math.Mod(left.(float64), right.(float64)), nil +} +func gteStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if isString(left) && isString(right) { + return boolIface(left.(string) >= right.(string)), nil + } + return boolIface(left.(float64) >= right.(float64)), nil +} +func gtStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if isString(left) && isString(right) { + return boolIface(left.(string) > right.(string)), nil + } + return boolIface(left.(float64) > right.(float64)), nil +} +func lteStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if isString(left) && isString(right) { + return boolIface(left.(string) <= right.(string)), nil + } + return boolIface(left.(float64) <= right.(float64)), nil +} +func ltStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if isString(left) && isString(right) { + return boolIface(left.(string) < right.(string)), nil + } + return boolIface(left.(float64) < right.(float64)), nil +} +func equalStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(reflect.DeepEqual(left, right)), nil +} +func notEqualStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(!reflect.DeepEqual(left, right)), nil +} +func andStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(left.(bool) && right.(bool)), nil +} +func orStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(left.(bool) || right.(bool)), nil +} +func negateStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return -right.(float64), nil +} +func invertStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return boolIface(!right.(bool)), nil +} +func bitwiseNotStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(^int64(right.(float64))), nil +} +func ternaryIfStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if left.(bool) { + return right, nil + } + return nil, nil +} +func ternaryElseStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + if left != nil { + return left, nil + } + return right, nil +} + +func regexStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + var pattern *regexp.Regexp + var err error + + switch right := right.(type) { + case string: + pattern, err = regexp.Compile(right) + if err != nil { + return nil, fmt.Errorf("Unable to compile regexp pattern '%v': %v", right, err) + } + case *regexp.Regexp: + pattern = right + } + + return pattern.Match([]byte(left.(string))), nil +} + +func notRegexStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + ret, err := regexStage(left, right, parameters) + if err != nil { + return nil, err + } + + return !(ret.(bool)), nil +} + +func bitwiseOrStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(int64(left.(float64)) | int64(right.(float64))), nil +} +func bitwiseAndStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(int64(left.(float64)) & int64(right.(float64))), nil +} +func bitwiseXORStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(int64(left.(float64)) ^ int64(right.(float64))), nil +} +func leftShiftStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(uint64(left.(float64)) << uint64(right.(float64))), nil +} +func rightShiftStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return float64(uint64(left.(float64)) >> uint64(right.(float64))), nil +} + +func makeParameterStage(parameterName string) evaluationOperator { + + return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + value, err := parameters.Get(parameterName) + if err != nil { + return nil, err + } + + return value, nil + } +} + +func makeLiteralStage(literal interface{}) evaluationOperator { + return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + return literal, nil + } +} + +func makeFunctionStage(function ExpressionFunction) evaluationOperator { + + return func(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + if right == nil { + return function() + } + + switch right := right.(type) { + case []interface{}: + return function(right...) + default: + return function(right) + } + } +} + +func typeConvertParam(p reflect.Value, t reflect.Type) (ret reflect.Value, err error) { + defer func() { + if r := recover(); r != nil { + errorMsg := fmt.Sprintf("Argument type conversion failed: failed to convert '%s' to '%s'", p.Kind().String(), t.Kind().String()) + err = errors.New(errorMsg) + ret = p + } + }() + + return p.Convert(t), nil +} + +func typeConvertParams(method reflect.Value, params []reflect.Value) ([]reflect.Value, error) { + + methodType := method.Type() + numIn := methodType.NumIn() + numParams := len(params) + + if numIn != numParams { + if numIn > numParams { + return nil, fmt.Errorf("Too few arguments to parameter call: got %d arguments, expected %d", len(params), numIn) + } + return nil, fmt.Errorf("Too many arguments to parameter call: got %d arguments, expected %d", len(params), numIn) + } + + for i := 0; i < numIn; i++ { + t := methodType.In(i) + p := params[i] + pt := p.Type() + + if t.Kind() != pt.Kind() { + np, err := typeConvertParam(p, t) + if err != nil { + return nil, err + } + params[i] = np + } + } + + return params, nil +} + +func makeAccessorStage(pair []string) evaluationOperator { + + reconstructed := strings.Join(pair, ".") + + return func(left interface{}, right interface{}, parameters Parameters) (ret interface{}, err error) { + + var params []reflect.Value + + value, err := parameters.Get(pair[0]) + if err != nil { + return nil, err + } + + // while this library generally tries to handle panic-inducing cases on its own, + // accessors are a sticky case which have a lot of possible ways to fail. + // therefore every call to an accessor sets up a defer that tries to recover from panics, converting them to errors. + defer func() { + if r := recover(); r != nil { + errorMsg := fmt.Sprintf("Failed to access '%s': %v", reconstructed, r.(string)) + err = errors.New(errorMsg) + ret = nil + } + }() + + LOOP: + for i := 1; i < len(pair); i++ { + + coreValue := reflect.ValueOf(value) + + var corePtrVal reflect.Value + + // if this is a pointer, resolve it. + if coreValue.Kind() == reflect.Ptr { + corePtrVal = coreValue + coreValue = coreValue.Elem() + } + + var field reflect.Value + var method reflect.Value + + switch coreValue.Kind() { + case reflect.Struct: + // check if field is exported + firstCharacter := getFirstRune(pair[i]) + if unicode.ToUpper(firstCharacter) != firstCharacter { + errorMsg := fmt.Sprintf("Unable to access unexported field '%s' in '%s'", pair[i], pair[i-1]) + return nil, errors.New(errorMsg) + } + + field = coreValue.FieldByName(pair[i]) + if field != (reflect.Value{}) { + value = field.Interface() + continue LOOP + } + + method = coreValue.MethodByName(pair[i]) + if method == (reflect.Value{}) { + if corePtrVal.IsValid() { + method = corePtrVal.MethodByName(pair[i]) + } + } + case reflect.Map: + field = coreValue.MapIndex(reflect.ValueOf(pair[i])) + if field != (reflect.Value{}) { + inter := field.Interface() + if inter != nil && reflect.TypeOf(inter).Kind() == reflect.Func { + method = reflect.ValueOf(inter) + } else { + value = inter + continue LOOP + } + } + default: + return nil, errors.New("Unable to access '" + pair[i] + "', '" + pair[i-1] + "' is not a struct or map") + } + + if method == (reflect.Value{}) { + return nil, errors.New("No method or field '" + pair[i] + "' present on parameter '" + pair[i-1] + "'") + } + + switch right := right.(type) { + case []interface{}: + + givenParams := right + params = make([]reflect.Value, len(givenParams)) + for idx := range givenParams { + params[idx] = reflect.ValueOf(givenParams[idx]) + } + + default: + + if right == nil { + params = []reflect.Value{} + break + } + + params = []reflect.Value{reflect.ValueOf(right)} + } + + params, err = typeConvertParams(method, params) + + if err != nil { + return nil, errors.New("Method call failed - '" + pair[0] + "." + pair[1] + "': " + err.Error()) + } + + returned := method.Call(params) + retLength := len(returned) + + if retLength == 0 { + return nil, errors.New("Method call '" + pair[i-1] + "." + pair[i] + "' did not return any values.") + } + + if retLength == 1 { + + value = returned[0].Interface() + continue + } + + if retLength == 2 { + + errIface := returned[1].Interface() + err, validType := errIface.(error) + + if validType && errIface != nil { + return returned[0].Interface(), err + } + + value = returned[0].Interface() + continue + } + + return nil, errors.New("Method call '" + pair[0] + "." + pair[1] + "' did not return either one value, or a value and an error. Cannot interpret meaning.") + } + + value = castToFloat64(value) + return value, nil + } +} + +func separatorStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + var ret []interface{} + + switch left := left.(type) { + case []interface{}: + ret = append(left, right) + default: + ret = []interface{}{left, right} + } + + return ret, nil +} + +func inStage(left interface{}, right interface{}, parameters Parameters) (interface{}, error) { + + for _, value := range right.([]interface{}) { + value = castToFloat64(value) + if left == value { + return true, nil + } + } + return false, nil +} + +// + +func isString(value interface{}) bool { + + switch value.(type) { + case string: + return true + } + return false +} + +func isRegexOrString(value interface{}) bool { + + switch value.(type) { + case string: + return true + case *regexp.Regexp: + return true + } + return false +} + +func isBool(value interface{}) bool { + switch value.(type) { + case bool: + return true + } + return false +} + +func isFloat64(value interface{}) bool { + switch value.(type) { + case float64: + return true + } + return false +} + +/* +Addition usually means between numbers, but can also mean string concat. +String concat needs one (or both) of the sides to be a string. +*/ +func additionTypeCheck(left interface{}, right interface{}) bool { + + if isFloat64(left) && isFloat64(right) { + return true + } + if !isString(left) && !isString(right) { + return false + } + return true +} + +/* +Comparison can either be between numbers, or lexicographic between two strings, +but never between the two. +*/ +func comparatorTypeCheck(left interface{}, right interface{}) bool { + + if isFloat64(left) && isFloat64(right) { + return true + } + if isString(left) && isString(right) { + return true + } + return false +} + +func isArray(value interface{}) bool { + switch value.(type) { + case []interface{}: + return true + } + return false +} + +/* +Converting a boolean to an interface{} requires an allocation. +We can use interned bools to avoid this cost. +*/ +func boolIface(b bool) interface{} { + if b { + return _true + } + return _false +} diff --git a/vendor/github.com/casbin/govaluate/expressionFunctions.go b/vendor/github.com/casbin/govaluate/expressionFunctions.go new file mode 100644 index 000000000..ac6592b3f --- /dev/null +++ b/vendor/github.com/casbin/govaluate/expressionFunctions.go @@ -0,0 +1,8 @@ +package govaluate + +/* + Represents a function that can be called from within an expression. + This method must return an error if, for any reason, it is unable to produce exactly one unambiguous result. + An error returned will halt execution of the expression. +*/ +type ExpressionFunction func(arguments ...interface{}) (interface{}, error) diff --git a/vendor/github.com/casbin/govaluate/expressionOutputStream.go b/vendor/github.com/casbin/govaluate/expressionOutputStream.go new file mode 100644 index 000000000..88a841639 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/expressionOutputStream.go @@ -0,0 +1,46 @@ +package govaluate + +import ( + "bytes" +) + +/* + Holds a series of "transactions" which represent each token as it is output by an outputter (such as ToSQLQuery()). + Some outputs (such as SQL) require a function call or non-c-like syntax to represent an expression. + To accomplish this, this struct keeps track of each translated token as it is output, and can return and rollback those transactions. +*/ +type expressionOutputStream struct { + transactions []string +} + +func (this *expressionOutputStream) add(transaction string) { + this.transactions = append(this.transactions, transaction) +} + +func (this *expressionOutputStream) rollback() string { + + index := len(this.transactions) - 1 + ret := this.transactions[index] + + this.transactions = this.transactions[:index] + return ret +} + +func (this *expressionOutputStream) createString(delimiter string) string { + + var retBuffer bytes.Buffer + var transaction string + + penultimate := len(this.transactions) - 1 + + for i := 0; i < penultimate; i++ { + + transaction = this.transactions[i] + + retBuffer.WriteString(transaction) + retBuffer.WriteString(delimiter) + } + retBuffer.WriteString(this.transactions[penultimate]) + + return retBuffer.String() +} diff --git a/vendor/github.com/casbin/govaluate/lexerState.go b/vendor/github.com/casbin/govaluate/lexerState.go new file mode 100644 index 000000000..6726e909e --- /dev/null +++ b/vendor/github.com/casbin/govaluate/lexerState.go @@ -0,0 +1,373 @@ +package govaluate + +import ( + "errors" + "fmt" +) + +type lexerState struct { + isEOF bool + isNullable bool + kind TokenKind + validNextKinds []TokenKind +} + +// lexer states. +// Constant for all purposes except compiler. +var validLexerStates = []lexerState{ + + lexerState{ + kind: UNKNOWN, + isEOF: false, + isNullable: true, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + VARIABLE, + PATTERN, + FUNCTION, + ACCESSOR, + STRING, + TIME, + CLAUSE, + }, + }, + + lexerState{ + + kind: CLAUSE, + isEOF: false, + isNullable: true, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + VARIABLE, + PATTERN, + FUNCTION, + ACCESSOR, + STRING, + TIME, + CLAUSE, + CLAUSE_CLOSE, + }, + }, + + lexerState{ + + kind: CLAUSE_CLOSE, + isEOF: true, + isNullable: true, + validNextKinds: []TokenKind{ + + COMPARATOR, + MODIFIER, + NUMERIC, + BOOLEAN, + VARIABLE, + STRING, + PATTERN, + TIME, + CLAUSE, + CLAUSE_CLOSE, + LOGICALOP, + TERNARY, + SEPARATOR, + }, + }, + + lexerState{ + + kind: NUMERIC, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: BOOLEAN, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: STRING, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: TIME, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + SEPARATOR, + }, + }, + lexerState{ + + kind: PATTERN, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + SEPARATOR, + }, + }, + lexerState{ + + kind: VARIABLE, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: MODIFIER, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + VARIABLE, + FUNCTION, + ACCESSOR, + STRING, + BOOLEAN, + CLAUSE, + CLAUSE_CLOSE, + }, + }, + lexerState{ + + kind: COMPARATOR, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + VARIABLE, + FUNCTION, + ACCESSOR, + STRING, + TIME, + CLAUSE, + CLAUSE_CLOSE, + PATTERN, + }, + }, + lexerState{ + + kind: LOGICALOP, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + VARIABLE, + FUNCTION, + ACCESSOR, + STRING, + TIME, + CLAUSE, + CLAUSE_CLOSE, + }, + }, + lexerState{ + + kind: PREFIX, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + NUMERIC, + BOOLEAN, + VARIABLE, + FUNCTION, + ACCESSOR, + CLAUSE, + CLAUSE_CLOSE, + }, + }, + + lexerState{ + + kind: TERNARY, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + STRING, + TIME, + VARIABLE, + FUNCTION, + ACCESSOR, + CLAUSE, + SEPARATOR, + }, + }, + lexerState{ + + kind: FUNCTION, + isEOF: false, + isNullable: false, + validNextKinds: []TokenKind{ + CLAUSE, + }, + }, + lexerState{ + + kind: ACCESSOR, + isEOF: true, + isNullable: false, + validNextKinds: []TokenKind{ + CLAUSE, + MODIFIER, + COMPARATOR, + LOGICALOP, + CLAUSE_CLOSE, + TERNARY, + SEPARATOR, + }, + }, + lexerState{ + + kind: SEPARATOR, + isEOF: false, + isNullable: true, + validNextKinds: []TokenKind{ + + PREFIX, + NUMERIC, + BOOLEAN, + STRING, + TIME, + VARIABLE, + FUNCTION, + ACCESSOR, + CLAUSE, + }, + }, +} + +func (this lexerState) canTransitionTo(kind TokenKind) bool { + + for _, validKind := range this.validNextKinds { + + if validKind == kind { + return true + } + } + + return false +} + +func checkExpressionSyntax(tokens []ExpressionToken) error { + + var state lexerState + var lastToken ExpressionToken + var err error + + state = validLexerStates[0] + + for _, token := range tokens { + + if !state.canTransitionTo(token.Kind) { + + // call out a specific error for tokens looking like they want to be functions. + if lastToken.Kind == VARIABLE && token.Kind == CLAUSE { + return errors.New("Undefined function " + lastToken.Value.(string)) + } + + firstStateName := fmt.Sprintf("%s [%v]", state.kind.String(), lastToken.Value) + nextStateName := fmt.Sprintf("%s [%v]", token.Kind.String(), token.Value) + + return errors.New("Cannot transition token types from " + firstStateName + " to " + nextStateName) + } + + state, err = getLexerStateForToken(token.Kind) + if err != nil { + return err + } + + if !state.isNullable && token.Value == nil { + + errorMsg := fmt.Sprintf("Token kind '%v' cannot have a nil value", token.Kind.String()) + return errors.New(errorMsg) + } + + lastToken = token + } + + if !state.isEOF { + return errors.New("Unexpected end of expression") + } + return nil +} + +func getLexerStateForToken(kind TokenKind) (lexerState, error) { + + for _, possibleState := range validLexerStates { + + if possibleState.kind == kind { + return possibleState, nil + } + } + + errorMsg := fmt.Sprintf("No lexer state found for token kind '%v'\n", kind.String()) + return validLexerStates[0], errors.New(errorMsg) +} diff --git a/vendor/github.com/casbin/govaluate/lexerStream.go b/vendor/github.com/casbin/govaluate/lexerStream.go new file mode 100644 index 000000000..c6ed76ec4 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/lexerStream.go @@ -0,0 +1,37 @@ +package govaluate + +type lexerStream struct { + source []rune + position int + length int +} + +func newLexerStream(source string) *lexerStream { + + var ret *lexerStream + var runes []rune + + for _, character := range source { + runes = append(runes, character) + } + + ret = new(lexerStream) + ret.source = runes + ret.length = len(runes) + return ret +} + +func (this *lexerStream) readCharacter() rune { + + character := this.source[this.position] + this.position += 1 + return character +} + +func (this *lexerStream) rewind(amount int) { + this.position -= amount +} + +func (this lexerStream) canRead() bool { + return this.position < this.length +} diff --git a/vendor/github.com/casbin/govaluate/parameters.go b/vendor/github.com/casbin/govaluate/parameters.go new file mode 100644 index 000000000..6c5b9ecb5 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/parameters.go @@ -0,0 +1,32 @@ +package govaluate + +import ( + "errors" +) + +/* + Parameters is a collection of named parameters that can be used by an EvaluableExpression to retrieve parameters + when an expression tries to use them. +*/ +type Parameters interface { + + /* + Get gets the parameter of the given name, or an error if the parameter is unavailable. + Failure to find the given parameter should be indicated by returning an error. + */ + Get(name string) (interface{}, error) +} + +type MapParameters map[string]interface{} + +func (p MapParameters) Get(name string) (interface{}, error) { + + value, found := p[name] + + if !found { + errorMessage := "No parameter '" + name + "' found." + return nil, errors.New(errorMessage) + } + + return value, nil +} diff --git a/vendor/github.com/casbin/govaluate/parsing.go b/vendor/github.com/casbin/govaluate/parsing.go new file mode 100644 index 000000000..dae78f7d2 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/parsing.go @@ -0,0 +1,509 @@ +package govaluate + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "time" + "unicode" +) + +func parseTokens(expression string, functions map[string]ExpressionFunction) ([]ExpressionToken, error) { + + var ret []ExpressionToken + var token ExpressionToken + var stream *lexerStream + var state lexerState + var err error + var found bool + + stream = newLexerStream(expression) + state = validLexerStates[0] + + for stream.canRead() { + + token, err, found = readToken(stream, state, functions) + + if err != nil { + return ret, err + } + + if !found { + break + } + + state, err = getLexerStateForToken(token.Kind) + if err != nil { + return ret, err + } + + // append this valid token + ret = append(ret, token) + } + + err = checkBalance(ret) + if err != nil { + return nil, err + } + + return ret, nil +} + +func readToken(stream *lexerStream, state lexerState, functions map[string]ExpressionFunction) (ExpressionToken, error, bool) { + + var function ExpressionFunction + var ret ExpressionToken + var tokenValue interface{} + var tokenTime time.Time + var tokenString string + var kind TokenKind + var character rune + var found bool + var completed bool + var err error + + // numeric is 0-9, or . or 0x followed by digits + // string starts with ' + // variable is alphanumeric, always starts with a letter + // bracket always means variable + // symbols are anything non-alphanumeric + // all others read into a buffer until they reach the end of the stream + for stream.canRead() { + + character = stream.readCharacter() + + if unicode.IsSpace(character) { + continue + } + + // numeric constant + if isNumeric(character) { + + if stream.canRead() && character == '0' { + character = stream.readCharacter() + + if stream.canRead() && character == 'x' { + tokenString, _ = readUntilFalse(stream, false, true, true, isHexDigit) + tokenValueInt, err := strconv.ParseUint(tokenString, 16, 64) + + if err != nil { + errorMsg := fmt.Sprintf("Unable to parse hex value '%v' to uint64\n", tokenString) + return ExpressionToken{}, errors.New(errorMsg), false + } + + kind = NUMERIC + tokenValue = float64(tokenValueInt) + break + } else { + stream.rewind(1) + } + } + + tokenString = readTokenUntilFalse(stream, isNumeric) + tokenValue, err = strconv.ParseFloat(tokenString, 64) + + if err != nil { + errorMsg := fmt.Sprintf("Unable to parse numeric value '%v' to float64\n", tokenString) + return ExpressionToken{}, errors.New(errorMsg), false + } + kind = NUMERIC + break + } + + // comma, separator + if character == ',' { + + tokenValue = "," + kind = SEPARATOR + break + } + + // escaped variable + if character == '[' { + + tokenValue, completed = readUntilFalse(stream, true, false, true, isNotClosingBracket) + kind = VARIABLE + + if !completed { + return ExpressionToken{}, errors.New("Unclosed parameter bracket"), false + } + + // above method normally rewinds us to the closing bracket, which we want to skip. + stream.rewind(-1) + break + } + + // regular variable - or function? + if unicode.IsLetter(character) { + + tokenString = readTokenUntilFalse(stream, isVariableName) + + tokenValue = tokenString + kind = VARIABLE + + // boolean? + if tokenValue == "true" { + + kind = BOOLEAN + tokenValue = true + } else { + + if tokenValue == "false" { + + kind = BOOLEAN + tokenValue = false + } + } + + // textual operator? + if tokenValue == "in" || tokenValue == "IN" { + + // force lower case for consistency + tokenValue = "in" + kind = COMPARATOR + } + + // function? + function, found = functions[tokenString] + if found { + kind = FUNCTION + tokenValue = function + } + + // accessor? + accessorIndex := strings.Index(tokenString, ".") + if accessorIndex > 0 { + + // check that it doesn't end with a hanging period + if tokenString[len(tokenString)-1] == '.' { + errorMsg := fmt.Sprintf("Hanging accessor on token '%s'", tokenString) + return ExpressionToken{}, errors.New(errorMsg), false + } + + kind = ACCESSOR + splits := strings.Split(tokenString, ".") + tokenValue = splits + } + break + } + + if !isNotQuote(character) { + tokenValue, completed = readUntilFalse(stream, true, false, true, isNotQuote) + + if !completed { + return ExpressionToken{}, errors.New("Unclosed string literal"), false + } + + // advance the stream one position, since reading until false assumes the terminator is a real token + stream.rewind(-1) + + // check to see if this can be parsed as a time. + tokenTime, found = tryParseTime(tokenValue.(string)) + if found { + kind = TIME + tokenValue = tokenTime + } else { + kind = STRING + } + break + } + + if character == '(' { + tokenValue = character + kind = CLAUSE + break + } + + if character == ')' { + tokenValue = character + kind = CLAUSE_CLOSE + break + } + + // must be a known symbol + tokenString = readTokenUntilFalse(stream, isNotAlphanumeric) + tokenValue = tokenString + + // quick hack for the case where "-" can mean "prefixed negation" or "minus", which are used + // very differently. + if state.canTransitionTo(PREFIX) { + _, found = prefixSymbols[tokenString] + if found { + + kind = PREFIX + break + } + } + _, found = modifierSymbols[tokenString] + if found { + + kind = MODIFIER + break + } + + _, found = logicalSymbols[tokenString] + if found { + + kind = LOGICALOP + break + } + + _, found = comparatorSymbols[tokenString] + if found { + + kind = COMPARATOR + break + } + + _, found = ternarySymbols[tokenString] + if found { + + kind = TERNARY + break + } + + errorMessage := fmt.Sprintf("Invalid token: '%s'", tokenString) + return ret, errors.New(errorMessage), false + } + + ret.Kind = kind + ret.Value = tokenValue + + return ret, nil, (kind != UNKNOWN) +} + +func readTokenUntilFalse(stream *lexerStream, condition func(rune) bool) string { + + var ret string + + stream.rewind(1) + ret, _ = readUntilFalse(stream, false, true, true, condition) + return ret +} + +/* +Returns the string that was read until the given [condition] was false, or whitespace was broken. +Returns false if the stream ended before whitespace was broken or condition was met. +*/ +func readUntilFalse(stream *lexerStream, includeWhitespace bool, breakWhitespace bool, allowEscaping bool, condition func(rune) bool) (string, bool) { + + var tokenBuffer bytes.Buffer + var character rune + var conditioned bool + + conditioned = false + + for stream.canRead() { + + character = stream.readCharacter() + + // Use backslashes to escape anything + if allowEscaping && character == '\\' { + + character = stream.readCharacter() + tokenBuffer.WriteString(string(character)) + continue + } + + if unicode.IsSpace(character) { + + if breakWhitespace && tokenBuffer.Len() > 0 { + conditioned = true + break + } + if !includeWhitespace { + continue + } + } + + if condition(character) { + tokenBuffer.WriteString(string(character)) + } else { + conditioned = true + stream.rewind(1) + break + } + } + + return tokenBuffer.String(), conditioned +} + +/* +Checks to see if any optimizations can be performed on the given [tokens], which form a complete, valid expression. +The returns slice will represent the optimized (or unmodified) list of tokens to use. +*/ +func optimizeTokens(tokens []ExpressionToken) ([]ExpressionToken, error) { + + var token ExpressionToken + var symbol OperatorSymbol + var err error + var index int + + for index, token = range tokens { + + // if we find a regex operator, and the right-hand value is a constant, precompile and replace with a pattern. + if token.Kind != COMPARATOR { + continue + } + + symbol = comparatorSymbols[token.Value.(string)] + if symbol != REQ && symbol != NREQ { + continue + } + + index++ + token = tokens[index] + if token.Kind == STRING { + + token.Kind = PATTERN + token.Value, err = regexp.Compile(token.Value.(string)) + + if err != nil { + return tokens, err + } + + tokens[index] = token + } + } + return tokens, nil +} + +/* +Checks the balance of tokens which have multiple parts, such as parenthesis. +*/ +func checkBalance(tokens []ExpressionToken) error { + + var stream *tokenStream + var token ExpressionToken + var parens int + + stream = newTokenStream(tokens) + + for stream.hasNext() { + + token = stream.next() + if token.Kind == CLAUSE { + parens++ + continue + } + if token.Kind == CLAUSE_CLOSE { + parens-- + continue + } + } + + if parens != 0 { + return errors.New("Unbalanced parenthesis") + } + return nil +} + +func isHexDigit(character rune) bool { + + character = unicode.ToLower(character) + + return unicode.IsDigit(character) || + character == 'a' || + character == 'b' || + character == 'c' || + character == 'd' || + character == 'e' || + character == 'f' +} + +func isNumeric(character rune) bool { + + return unicode.IsDigit(character) || character == '.' +} + +func isNotQuote(character rune) bool { + + return character != '\'' && character != '"' +} + +func isNotAlphanumeric(character rune) bool { + + return !(unicode.IsDigit(character) || + unicode.IsLetter(character) || + character == '(' || + character == ')' || + character == '[' || + character == ']' || // starting to feel like there needs to be an `isOperation` func (#59) + !isNotQuote(character)) +} + +func isVariableName(character rune) bool { + + return unicode.IsLetter(character) || + unicode.IsDigit(character) || + character == '_' || + character == '.' +} + +func isNotClosingBracket(character rune) bool { + + return character != ']' +} + +/* +Attempts to parse the [candidate] as a Time. +Tries a series of standardized date formats, returns the Time if one applies, +otherwise returns false through the second return. +*/ +func tryParseTime(candidate string) (time.Time, bool) { + + var ret time.Time + var found bool + + timeFormats := [...]string{ + time.ANSIC, + time.UnixDate, + time.RubyDate, + time.Kitchen, + time.RFC3339, + time.RFC3339Nano, + "2006-01-02", // RFC 3339 + "2006-01-02 15:04", // RFC 3339 with minutes + "2006-01-02 15:04:05", // RFC 3339 with seconds + "2006-01-02 15:04:05-07:00", // RFC 3339 with seconds and timezone + "2006-01-02T15Z0700", // ISO8601 with hour + "2006-01-02T15:04Z0700", // ISO8601 with minutes + "2006-01-02T15:04:05Z0700", // ISO8601 with seconds + "2006-01-02T15:04:05.999999999Z0700", // ISO8601 with nanoseconds + } + + for _, format := range timeFormats { + + ret, found = tryParseExactTime(candidate, format) + if found { + return ret, true + } + } + + return time.Now(), false +} + +func tryParseExactTime(candidate string, format string) (time.Time, bool) { + + var ret time.Time + var err error + + ret, err = time.ParseInLocation(format, candidate, time.Local) + if err != nil { + return time.Now(), false + } + + return ret, true +} + +func getFirstRune(candidate string) rune { + + for _, character := range candidate { + return character + } + + return 0 +} diff --git a/vendor/github.com/casbin/govaluate/sanitizedParameters.go b/vendor/github.com/casbin/govaluate/sanitizedParameters.go new file mode 100644 index 000000000..b254bff6a --- /dev/null +++ b/vendor/github.com/casbin/govaluate/sanitizedParameters.go @@ -0,0 +1,43 @@ +package govaluate + +// sanitizedParameters is a wrapper for Parameters that does sanitization as +// parameters are accessed. +type sanitizedParameters struct { + orig Parameters +} + +func (p sanitizedParameters) Get(key string) (interface{}, error) { + value, err := p.orig.Get(key) + if err != nil { + return nil, err + } + + return castToFloat64(value), nil +} + +func castToFloat64(value interface{}) interface{} { + switch value := value.(type) { + case uint8: + return float64(value) + case uint16: + return float64(value) + case uint32: + return float64(value) + case uint64: + return float64(value) + case int8: + return float64(value) + case int16: + return float64(value) + case int32: + return float64(value) + case int64: + return float64(value) + case int: + return float64(value) + case float32: + return float64(value) + } + + return value +} diff --git a/vendor/github.com/casbin/govaluate/stagePlanner.go b/vendor/github.com/casbin/govaluate/stagePlanner.go new file mode 100644 index 000000000..400a2879b --- /dev/null +++ b/vendor/github.com/casbin/govaluate/stagePlanner.go @@ -0,0 +1,728 @@ +package govaluate + +import ( + "errors" + "fmt" + "time" +) + +var stageSymbolMap = map[OperatorSymbol]evaluationOperator{ + EQ: equalStage, + NEQ: notEqualStage, + GT: gtStage, + LT: ltStage, + GTE: gteStage, + LTE: lteStage, + REQ: regexStage, + NREQ: notRegexStage, + AND: andStage, + OR: orStage, + IN: inStage, + BITWISE_OR: bitwiseOrStage, + BITWISE_AND: bitwiseAndStage, + BITWISE_XOR: bitwiseXORStage, + BITWISE_LSHIFT: leftShiftStage, + BITWISE_RSHIFT: rightShiftStage, + PLUS: addStage, + MINUS: subtractStage, + MULTIPLY: multiplyStage, + DIVIDE: divideStage, + MODULUS: modulusStage, + EXPONENT: exponentStage, + NEGATE: negateStage, + INVERT: invertStage, + BITWISE_NOT: bitwiseNotStage, + TERNARY_TRUE: ternaryIfStage, + TERNARY_FALSE: ternaryElseStage, + COALESCE: ternaryElseStage, + SEPARATE: separatorStage, +} + +/* +A "precedent" is a function which will recursively parse new evaluateionStages from a given stream of tokens. +It's called a `precedent` because it is expected to handle exactly what precedence of operator, +and defer to other `precedent`s for other operators. +*/ +type precedent func(stream *tokenStream) (*evaluationStage, error) + +/* +A convenience function for specifying the behavior of a `precedent`. +Most `precedent` functions can be described by the same function, just with different type checks, symbols, and error formats. +This struct is passed to `makePrecedentFromPlanner` to create a `precedent` function. +*/ +type precedencePlanner struct { + validSymbols map[string]OperatorSymbol + validKinds []TokenKind + + typeErrorFormat string + + next precedent + nextRight precedent +} + +var planPrefix precedent +var planExponential precedent +var planMultiplicative precedent +var planAdditive precedent +var planBitwise precedent +var planShift precedent +var planComparator precedent +var planLogicalAnd precedent +var planLogicalOr precedent +var planTernary precedent +var planSeparator precedent + +func init() { + + // all these stages can use the same code (in `planPrecedenceLevel`) to execute, + // they simply need different type checks, symbols, and recursive precedents. + // While not all precedent phases are listed here, most can be represented this way. + planPrefix = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: prefixSymbols, + validKinds: []TokenKind{PREFIX}, + typeErrorFormat: prefixErrorFormat, + nextRight: planFunction, + }) + planExponential = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: exponentialSymbolsS, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planFunction, + }) + planMultiplicative = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: multiplicativeSymbols, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planExponential, + }) + planAdditive = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: additiveSymbols, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planMultiplicative, + }) + planShift = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: bitwiseShiftSymbols, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planAdditive, + }) + planBitwise = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: bitwiseSymbols, + validKinds: []TokenKind{MODIFIER}, + typeErrorFormat: modifierErrorFormat, + next: planShift, + }) + planComparator = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: comparatorSymbols, + validKinds: []TokenKind{COMPARATOR}, + typeErrorFormat: comparatorErrorFormat, + next: planBitwise, + }) + planLogicalAnd = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: map[string]OperatorSymbol{"&&": AND}, + validKinds: []TokenKind{LOGICALOP}, + typeErrorFormat: logicalErrorFormat, + next: planComparator, + }) + planLogicalOr = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: map[string]OperatorSymbol{"||": OR}, + validKinds: []TokenKind{LOGICALOP}, + typeErrorFormat: logicalErrorFormat, + next: planLogicalAnd, + }) + planTernary = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: ternarySymbols, + validKinds: []TokenKind{TERNARY}, + typeErrorFormat: ternaryErrorFormat, + next: planLogicalOr, + }) + planSeparator = makePrecedentFromPlanner(&precedencePlanner{ + validSymbols: separatorSymbols, + validKinds: []TokenKind{SEPARATOR}, + next: planTernary, + }) +} + +/* +Given a planner, creates a function which will evaluate a specific precedence level of operators, +and link it to other `precedent`s which recurse to parse other precedence levels. +*/ +func makePrecedentFromPlanner(planner *precedencePlanner) precedent { + + var generated precedent + var nextRight precedent + + generated = func(stream *tokenStream) (*evaluationStage, error) { + return planPrecedenceLevel( + stream, + planner.typeErrorFormat, + planner.validSymbols, + planner.validKinds, + nextRight, + planner.next, + ) + } + + if planner.nextRight != nil { + nextRight = planner.nextRight + } else { + nextRight = generated + } + + return generated +} + +/* +Creates a `evaluationStageList` object which represents an execution plan (or tree) +which is used to completely evaluate a set of tokens at evaluation-time. +The three stages of evaluation can be thought of as parsing strings to tokens, then tokens to a stage list, then evaluation with parameters. +*/ +func planStages(tokens []ExpressionToken) (*evaluationStage, error) { + + stream := newTokenStream(tokens) + + stage, err := planTokens(stream) + if err != nil { + return nil, err + } + + // while we're now fully-planned, we now need to re-order same-precedence operators. + // this could probably be avoided with a different planning method + reorderStages(stage) + + stage = elideLiterals(stage) + return stage, nil +} + +func planTokens(stream *tokenStream) (*evaluationStage, error) { + + if !stream.hasNext() { + return nil, nil + } + + return planSeparator(stream) +} + +/* +The most usual method of parsing an evaluation stage for a given precedence. +Most stages use the same logic +*/ +func planPrecedenceLevel( + stream *tokenStream, + typeErrorFormat string, + validSymbols map[string]OperatorSymbol, + validKinds []TokenKind, + rightPrecedent precedent, + leftPrecedent precedent) (*evaluationStage, error) { + + var token ExpressionToken + var symbol OperatorSymbol + var leftStage, rightStage *evaluationStage + var checks typeChecks + var err error + var keyFound bool + + if leftPrecedent != nil { + + leftStage, err = leftPrecedent(stream) + if err != nil { + return nil, err + } + } + + rewind := func() (*evaluationStage, error) { + stream.rewind() + return leftStage, nil + } + + if stream.hasNext() { + + token = stream.next() + + if len(validKinds) > 0 { + + keyFound = false + for _, kind := range validKinds { + if kind == token.Kind { + keyFound = true + break + } + } + + if !keyFound { + return rewind() + } + } + + if validSymbols != nil { + + if !isString(token.Value) { + return rewind() + } + + symbol, keyFound = validSymbols[token.Value.(string)] + if !keyFound { + return rewind() + } + } + + if rightPrecedent != nil { + rightStage, err = rightPrecedent(stream) + if err != nil { + return nil, err + } + } + + checks = findTypeChecks(symbol) + + return &evaluationStage{ + + symbol: symbol, + leftStage: leftStage, + rightStage: rightStage, + operator: stageSymbolMap[symbol], + + leftTypeCheck: checks.left, + rightTypeCheck: checks.right, + typeCheck: checks.combined, + typeErrorFormat: typeErrorFormat, + }, nil + } + + return rewind() +} + +/* +A special case where functions need to be of higher precedence than values, and need a special wrapped execution stage operator. +*/ +func planFunction(stream *tokenStream) (*evaluationStage, error) { + + var token ExpressionToken + var rightStage *evaluationStage + var err error + + token = stream.next() + + if token.Kind != FUNCTION { + stream.rewind() + return planAccessor(stream) + } + + rightStage, err = planAccessor(stream) + if err != nil { + return nil, err + } + + return &evaluationStage{ + + symbol: FUNCTIONAL, + rightStage: rightStage, + operator: makeFunctionStage(token.Value.(ExpressionFunction)), + typeErrorFormat: "Unable to run function '%v': %v", + }, nil +} + +func planAccessor(stream *tokenStream) (*evaluationStage, error) { + + var token, otherToken ExpressionToken + var rightStage *evaluationStage + var err error + + if !stream.hasNext() { + return nil, nil + } + + token = stream.next() + + if token.Kind != ACCESSOR { + stream.rewind() + return planValue(stream) + } + + // check if this is meant to be a function or a field. + // fields have a clause next to them, functions do not. + // if it's a function, parse the arguments. Otherwise leave the right stage null. + if stream.hasNext() { + + otherToken = stream.next() + if otherToken.Kind == CLAUSE { + + stream.rewind() + + rightStage, err = planTokens(stream) + if err != nil { + return nil, err + } + } else { + stream.rewind() + } + } + + return &evaluationStage{ + + symbol: ACCESS, + rightStage: rightStage, + operator: makeAccessorStage(token.Value.([]string)), + typeErrorFormat: "Unable to access parameter field or method '%v': %v", + }, nil +} + +/* +A truly special precedence function, this handles all the "lowest-case" errata of the process, including literals, parmeters, +clauses, and prefixes. +*/ +func planValue(stream *tokenStream) (*evaluationStage, error) { + + var token ExpressionToken + var symbol OperatorSymbol + var ret *evaluationStage + var operator evaluationOperator + var err error + + if !stream.hasNext() { + return nil, nil + } + + token = stream.next() + + switch token.Kind { + + case CLAUSE: + + ret, err = planTokens(stream) + if err != nil { + return nil, err + } + + // advance past the CLAUSE_CLOSE token. We know that it's a CLAUSE_CLOSE, because at parse-time we check for unbalanced parens. + stream.next() + + // the stage we got represents all of the logic contained within the parens + // but for technical reasons, we need to wrap this stage in a "noop" stage which breaks long chains of precedence. + // see github #33. + ret = &evaluationStage{ + rightStage: ret, + operator: noopStageRight, + symbol: NOOP, + } + + return ret, nil + + case CLAUSE_CLOSE: + + // when functions have empty params, this will be hit. In this case, we don't have any evaluation stage to do, + // so we just return nil so that the stage planner continues on its way. + stream.rewind() + return nil, nil + + case VARIABLE: + operator = makeParameterStage(token.Value.(string)) + + case NUMERIC: + fallthrough + case STRING: + fallthrough + case PATTERN: + fallthrough + case BOOLEAN: + symbol = LITERAL + operator = makeLiteralStage(token.Value) + case TIME: + symbol = LITERAL + operator = makeLiteralStage(float64(token.Value.(time.Time).Unix())) + + case PREFIX: + stream.rewind() + return planPrefix(stream) + } + + if operator == nil { + errorMsg := fmt.Sprintf("Unable to plan token kind: '%s', value: '%v'", token.Kind.String(), token.Value) + return nil, errors.New(errorMsg) + } + + return &evaluationStage{ + symbol: symbol, + operator: operator, + }, nil +} + +/* +Convenience function to pass a triplet of typechecks between `findTypeChecks` and `planPrecedenceLevel`. +Each of these members may be nil, which indicates that type does not matter for that value. +*/ +type typeChecks struct { + left stageTypeCheck + right stageTypeCheck + combined stageCombinedTypeCheck +} + +/* +Maps a given [symbol] to a set of typechecks to be used during runtime. +*/ +func findTypeChecks(symbol OperatorSymbol) typeChecks { + + switch symbol { + case GT: + fallthrough + case LT: + fallthrough + case GTE: + fallthrough + case LTE: + return typeChecks{ + combined: comparatorTypeCheck, + } + case REQ: + fallthrough + case NREQ: + return typeChecks{ + left: isString, + right: isRegexOrString, + } + case AND: + fallthrough + case OR: + return typeChecks{ + left: isBool, + right: isBool, + } + case IN: + return typeChecks{ + right: isArray, + } + case BITWISE_LSHIFT: + fallthrough + case BITWISE_RSHIFT: + fallthrough + case BITWISE_OR: + fallthrough + case BITWISE_AND: + fallthrough + case BITWISE_XOR: + return typeChecks{ + left: isFloat64, + right: isFloat64, + } + case PLUS: + return typeChecks{ + combined: additionTypeCheck, + } + case MINUS: + fallthrough + case MULTIPLY: + fallthrough + case DIVIDE: + fallthrough + case MODULUS: + fallthrough + case EXPONENT: + return typeChecks{ + left: isFloat64, + right: isFloat64, + } + case NEGATE: + return typeChecks{ + right: isFloat64, + } + case INVERT: + return typeChecks{ + right: isBool, + } + case BITWISE_NOT: + return typeChecks{ + right: isFloat64, + } + case TERNARY_TRUE: + return typeChecks{ + left: isBool, + } + + // unchecked cases + case EQ: + fallthrough + case NEQ: + return typeChecks{} + case TERNARY_FALSE: + fallthrough + case COALESCE: + fallthrough + default: + return typeChecks{} + } +} + +/* +During stage planning, stages of equal precedence are parsed such that they'll be evaluated in reverse order. +For commutative operators like "+" or "-", it's no big deal. But for order-specific operators, it ruins the expected result. +*/ +func reorderStages(rootStage *evaluationStage) { + + // traverse every rightStage until we find multiples in a row of the same precedence. + var identicalPrecedences []*evaluationStage + var currentStage, nextStage *evaluationStage + var precedence, currentPrecedence operatorPrecedence + + nextStage = rootStage + precedence = findOperatorPrecedenceForSymbol(rootStage.symbol) + + for nextStage != nil { + + currentStage = nextStage + nextStage = currentStage.rightStage + + // left depth first, since this entire method only looks for precedences down the right side of the tree + if currentStage.leftStage != nil { + reorderStages(currentStage.leftStage) + } + + currentPrecedence = findOperatorPrecedenceForSymbol(currentStage.symbol) + + if currentPrecedence == precedence { + identicalPrecedences = append(identicalPrecedences, currentStage) + continue + } + + // precedence break. + // See how many in a row we had, and reorder if there's more than one. + if len(identicalPrecedences) > 1 { + mirrorStageSubtree(identicalPrecedences) + } + + identicalPrecedences = []*evaluationStage{currentStage} + precedence = currentPrecedence + } + + if len(identicalPrecedences) > 1 { + mirrorStageSubtree(identicalPrecedences) + } +} + +/* +Performs a "mirror" on a subtree of stages. +This mirror functionally inverts the order of execution for all members of the [stages] list. +That list is assumed to be a root-to-leaf (ordered) list of evaluation stages, where each is a right-hand stage of the last. +*/ +func mirrorStageSubtree(stages []*evaluationStage) { + + var rootStage, inverseStage, carryStage, frontStage *evaluationStage + + stagesLength := len(stages) + + // reverse all right/left + for _, frontStage = range stages { + + carryStage = frontStage.rightStage + frontStage.rightStage = frontStage.leftStage + frontStage.leftStage = carryStage + } + + // end left swaps with root right + rootStage = stages[0] + frontStage = stages[stagesLength-1] + + carryStage = frontStage.leftStage + frontStage.leftStage = rootStage.rightStage + rootStage.rightStage = carryStage + + // for all non-root non-end stages, right is swapped with inverse stage right in list + for i := 0; i < (stagesLength-2)/2+1; i++ { + + frontStage = stages[i+1] + inverseStage = stages[stagesLength-i-1] + + carryStage = frontStage.rightStage + frontStage.rightStage = inverseStage.rightStage + inverseStage.rightStage = carryStage + } + + // swap all other information with inverse stages + for i := 0; i < stagesLength/2; i++ { + + frontStage = stages[i] + inverseStage = stages[stagesLength-i-1] + frontStage.swapWith(inverseStage) + } +} + +/* +Recurses through all operators in the entire tree, eliding operators where both sides are literals. +*/ +func elideLiterals(root *evaluationStage) *evaluationStage { + + if root.leftStage != nil { + root.leftStage = elideLiterals(root.leftStage) + } + + if root.rightStage != nil { + root.rightStage = elideLiterals(root.rightStage) + } + + return elideStage(root) +} + +/* +Elides a specific stage, if possible. +Returns the unmodified [root] stage if it cannot or should not be elided. +Otherwise, returns a new stage representing the condensed value from the elided stages. +*/ +func elideStage(root *evaluationStage) *evaluationStage { + + var leftValue, rightValue, result interface{} + var err error + + // right side must be a non-nil value. Left side must be nil or a value. + if root.rightStage == nil || + root.rightStage.symbol != LITERAL || + root.leftStage == nil || + root.leftStage.symbol != LITERAL { + return root + } + + // don't elide some operators + switch root.symbol { + case SEPARATE: + fallthrough + case IN: + return root + } + + // both sides are values, get their actual values. + // errors should be near-impossible here. If we encounter them, just abort this optimization. + leftValue, err = root.leftStage.operator(nil, nil, nil) + if err != nil { + return root + } + + rightValue, err = root.rightStage.operator(nil, nil, nil) + if err != nil { + return root + } + + // typcheck, since the grammar checker is a bit loose with which operator symbols go together. + err = typeCheck(root.leftTypeCheck, leftValue, root.symbol, root.typeErrorFormat) + if err != nil { + return root + } + + err = typeCheck(root.rightTypeCheck, rightValue, root.symbol, root.typeErrorFormat) + if err != nil { + return root + } + + if root.typeCheck != nil && !root.typeCheck(leftValue, rightValue) { + return root + } + + // pre-calculate, and return a new stage representing the result. + result, err = root.operator(leftValue, rightValue, nil) + if err != nil { + return root + } + + return &evaluationStage{ + symbol: LITERAL, + operator: makeLiteralStage(result), + } +} diff --git a/vendor/github.com/casbin/govaluate/test.sh b/vendor/github.com/casbin/govaluate/test.sh new file mode 100644 index 000000000..11aa8b332 --- /dev/null +++ b/vendor/github.com/casbin/govaluate/test.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Script that runs tests, code coverage, and benchmarks all at once. +# Builds a symlink in /tmp, mostly to avoid messing with GOPATH at the user's shell level. + +TEMPORARY_PATH="/tmp/govaluate_test" +SRC_PATH="${TEMPORARY_PATH}/src" +FULL_PATH="${TEMPORARY_PATH}/src/govaluate" + +# set up temporary directory +rm -rf "${FULL_PATH}" +mkdir -p "${SRC_PATH}" + +ln -s $(pwd) "${FULL_PATH}" +export GOPATH="${TEMPORARY_PATH}" + +pushd "${TEMPORARY_PATH}/src/govaluate" + +# run the actual tests. +export GOVALUATE_TORTURE_TEST="true" +go test -bench=. -benchmem #-coverprofile coverage.out +status=$? + +if [ "${status}" != 0 ]; +then + exit $status +fi + +# coverage +# disabled because travis go1.4 seems not to support it suddenly? +#go tool cover -func=coverage.out + +popd diff --git a/vendor/github.com/casbin/govaluate/tokenStream.go b/vendor/github.com/casbin/govaluate/tokenStream.go new file mode 100644 index 000000000..7c7c40abd --- /dev/null +++ b/vendor/github.com/casbin/govaluate/tokenStream.go @@ -0,0 +1,30 @@ +package govaluate + +type tokenStream struct { + tokens []ExpressionToken + index int + tokenLength int +} + +func newTokenStream(tokens []ExpressionToken) *tokenStream { + ret := new(tokenStream) + ret.tokens = tokens + ret.tokenLength = len(tokens) + return ret +} + +func (this *tokenStream) rewind() { + this.index -= 1 +} + +func (this *tokenStream) next() ExpressionToken { + token := this.tokens[this.index] + + this.index += 1 + return token +} + +func (this tokenStream) hasNext() bool { + + return this.index < this.tokenLength +} diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b..f4e7dbf37 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be..000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b..daea9dd6d 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e575754..fa854785d 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d..e4ac2a2ff 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd..c349c326c 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e4..36c311694 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a..d8de5ab76 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c01..5eb5dbc66 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d6..c54a63083 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc49..0760efe91 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 000000000..b0eab1009 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 000000000..928319fb0 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 000000000..3186b0c34 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 000000000..f69fdb930 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 000000000..607e683bd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 000000000..35c734be4 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 000000000..e5b3b6f69 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 000000000..1dd455bc5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 000000000..f1b2e73bd --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 000000000..52bf4ce53 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 000000000..547df1df8 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 000000000..7daa45e19 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 000000000..30976ce97 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 000000000..37dfeddc2 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 000000000..a72c64954 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae65..000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b88..f65e8fe3e 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78..a29fc7aab 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go index c0a6f6927..0fc510a0a 100644 --- a/vendor/github.com/golang-jwt/jwt/v4/parser.go +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -7,6 +7,8 @@ import ( "strings" ) +const tokenDelimiter = "." + type Parser struct { // If populated, only these methods will be considered valid. // @@ -36,19 +38,21 @@ func NewParser(options ...ParserOption) *Parser { return p } -// Parse parses, validates, verifies the signature and returns the parsed token. -// keyFunc will receive the parsed token and should return the key for validating. +// Parse parses, validates, verifies the signature and returns the parsed token. keyFunc will +// receive the parsed token and should return the key for validating. func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) } -// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims -// interface. This provides default values which can be overridden and allows a caller to use their own type, rather -// than the default MapClaims implementation of Claims. +// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object +// implementing the Claims interface. This provides default values which can be overridden and +// allows a caller to use their own type, rather than the default MapClaims implementation of +// Claims. // -// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims), -// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the -// proper memory for it before passing in the overall claims, otherwise you might run into a panic. +// Note: If you provide a custom claim implementation that embeds one of the standard claims (such +// as RegisteredClaims), make sure that a) you either embed a non-pointer version of the claims or +// b) if you are using a pointer, allocate the proper memory for it before passing in the overall +// claims, otherwise you might run into a panic. func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { token, parts, err := p.ParseUnverified(tokenString, claims) if err != nil { @@ -85,12 +89,17 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} } + // Perform validation + token.Signature = parts[2] + if err := token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + return token, &ValidationError{Inner: err, Errors: ValidationErrorSignatureInvalid} + } + vErr := &ValidationError{} // Validate Claims if !p.SkipClaimsValidation { if err := token.Claims.Valid(); err != nil { - // If the Claims Valid returned an error, check if it is a validation error, // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set if e, ok := err.(*ValidationError); !ok { @@ -98,22 +107,14 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf } else { vErr = e } + return token, vErr } } - // Perform validation - token.Signature = parts[2] - if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { - vErr.Inner = err - vErr.Errors |= ValidationErrorSignatureInvalid - } - - if vErr.valid() { - token.Valid = true - return token, nil - } + // No errors so far, token is valid. + token.Valid = true - return token, vErr + return token, nil } // ParseUnverified parses the token but doesn't validate the signature. @@ -123,9 +124,10 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf // It's only ever useful in cases where you know the signature is valid (because it has // been checked previously in the stack) and you want to extract values from it. func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { - parts = strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + var ok bool + parts, ok = splitToken(tokenString) + if !ok { + return nil, nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) } token = &Token{Raw: tokenString} @@ -175,3 +177,30 @@ func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Toke return token, parts, nil } + +// splitToken splits a token string into three parts: header, claims, and signature. It will only +// return true if the token contains exactly two delimiters and three parts. In all other cases, it +// will return nil parts and false. +func splitToken(token string) ([]string, bool) { + parts := make([]string, 3) + header, remain, ok := strings.Cut(token, tokenDelimiter) + if !ok { + return nil, false + } + parts[0] = header + claims, remain, ok := strings.Cut(remain, tokenDelimiter) + if !ok { + return nil, false + } + parts[1] = claims + // One more cut to ensure the signature is the last part of the token and there are no more + // delimiters. This avoids an issue where malicious input could contain additional delimiters + // causing unecessary overhead parsing tokens. + signature, _, unexpected := strings.Cut(remain, tokenDelimiter) + if unexpected { + return nil, false + } + parts[2] = signature + + return parts, true +} diff --git a/vendor/github.com/google/go-github/v62/github/orgs_personal_access_tokens.go b/vendor/github.com/google/go-github/v62/github/orgs_personal_access_tokens.go deleted file mode 100644 index 0d786114f..000000000 --- a/vendor/github.com/google/go-github/v62/github/orgs_personal_access_tokens.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2023 The go-github AUTHORS. All rights reserved. -// -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package github - -import ( - "context" - "fmt" - "net/http" -) - -// ReviewPersonalAccessTokenRequestOptions specifies the parameters to the ReviewPersonalAccessTokenRequest method. -type ReviewPersonalAccessTokenRequestOptions struct { - Action string `json:"action"` - Reason *string `json:"reason,omitempty"` -} - -// ReviewPersonalAccessTokenRequest approves or denies a pending request to access organization resources via a fine-grained personal access token. -// Only GitHub Apps can call this API, using the `organization_personal_access_token_requests: write` permission. -// `action` can be one of `approve` or `deny`. -// -// GitHub API docs: https://docs.github.com/rest/orgs/personal-access-tokens#review-a-request-to-access-organization-resources-with-a-fine-grained-personal-access-token -// -//meta:operation POST /orgs/{org}/personal-access-token-requests/{pat_request_id} -func (s *OrganizationsService) ReviewPersonalAccessTokenRequest(ctx context.Context, org string, requestID int64, opts ReviewPersonalAccessTokenRequestOptions) (*Response, error) { - u := fmt.Sprintf("orgs/%v/personal-access-token-requests/%v", org, requestID) - - req, err := s.client.NewRequest(http.MethodPost, u, &opts) - if err != nil { - return nil, err - } - - return s.client.Do(ctx, req, nil) -} diff --git a/vendor/github.com/google/go-github/v62/AUTHORS b/vendor/github.com/google/go-github/v66/AUTHORS similarity index 90% rename from vendor/github.com/google/go-github/v62/AUTHORS rename to vendor/github.com/google/go-github/v66/AUTHORS index 0197b94a2..d21164509 100644 --- a/vendor/github.com/google/go-github/v62/AUTHORS +++ b/vendor/github.com/google/go-github/v66/AUTHORS @@ -13,7 +13,10 @@ 413x 6543 <6543@obermui.de> Abed Kibbe +Abhijit Hota Abhinav Gupta +abhishek +Abhishek Sharma Abhishek Veeramalla aboy Adam Kohring @@ -35,19 +38,24 @@ Alex Orr Alex Su Alex Unger Alexander Harkness +Alexey Alekhin Alexis Gauthiez Ali Farooq +Alin Balutoiu Allan Guwatudde Allen Sun Amey Sakhadeo Anders Janmyr +Andreas Deininger Andreas Garnæs Andrew Ryabchun Andrew Svoboda +Andriyun Andy Grunwald Andy Hume Andy Lindeman angie pinilla +Anish Rajan anjanashenoy Anshuman Bhartiya Antoine @@ -59,6 +67,7 @@ aprp apurwaj2 Aravind Arda Kuyumcu +Ary Arıl Bozoluk Asier Marruedo Austin Burdine @@ -66,9 +75,11 @@ Austin Dizzy Azuka Okuleye Ben Batha Benjamen Keroack +Benjamin Nater Berkay Tacyildiz Beshr Kayali Beyang Liu +billnapier Billy Keyes Billy Lynch Bingtan Lu @@ -93,6 +104,7 @@ Cami Diez Carl Johnson Carlos Alexandro Becker Carlos Tadeu Panato Junior +Casey ChandanChainani chandresh-pancholi Charles Fenwick Elliott @@ -109,6 +121,7 @@ Christian Muehlhaeuser Christoph Jerolimov Christoph Sassenberg CI Monk +Clemens W Colin Misare Craig Gumbley Craig Peterson @@ -137,6 +150,7 @@ DeviousLab Dhi Aurrahman Diego Lapiduz Diogo Vilela +Dion Gionet Mallet Dmitri Shuralyov dmnlk Don Petersen @@ -149,9 +163,12 @@ Eli Uriegas Elliott Beach Emerson Wood Emil V +Emma Sax Eng Zer Jun +Enrico Candino eperm Erick Fejta +Erik Elkins Erik Nobel erwinvaneyk Evan Anderson @@ -163,6 +180,7 @@ Federico Di Pierro Felix Geisendörfer Filippo Valsorda Florian Forster +Florian Maier Florian Wagner Francesc Gil Francis @@ -178,6 +196,7 @@ Glen Mailer Gnahz Google Inc. Grachev Mikhail +Gregory Oschwald griffin_stewie guangwu Guillaume Jacquet @@ -202,6 +221,7 @@ ishan upadhyay isqua Jacob Valdemar Jake Krammer +Jake Scaltreto Jake White Jameel Haffejee James Bowes @@ -210,6 +230,7 @@ James Loh James Maguire James Turley Jamie West +Jan Guth Jan Kosecki Jan Švábík Jason Field @@ -224,6 +245,7 @@ Jihoon Chung Jille Timmermans Jimmi Dyson Joan Saum +JoannaaKL Joe Tsai John Barton John Engelman @@ -232,6 +254,7 @@ John Liu Jordan Brockopp Jordan Burandt Jordan Sussman +Jorge Ferrero Jorge Gómez Reus Joshua Bezaleel Abednego João Cerqueira @@ -240,6 +263,7 @@ jpbelanger-mtl Juan Juan Basso Julien Garcia Gonzalez +Julien Midedji Julien Rostand Junya Kono Justin Abrahms @@ -257,6 +281,7 @@ Kevin Burke Kevin Wang Kevin Zhao kgalli +Khanh Ngo Kirill Konrad Malawski Kookheon Kwon @@ -266,10 +291,12 @@ Kshitij Saraogi Kumar Saurabh Kyle Kurz kyokomi +Lachlan Cooper Lars Lehtonen Laurent Verdoïa leopoldwang Liam Galvin +Liam Stanley Lluis Campos Lovro Mažgon Loïs Postula @@ -283,6 +310,8 @@ Luke Kysow Luke Roberts Luke Young lynn [they] +Léo Salé +M. Ryan Rigdon Magnus Kulke Maksim Zhylinski Marc Binder @@ -294,6 +323,7 @@ Martins Sipenko Marwan Sulaiman Masayuki Izumi Mat Geist +Matheus Santos Araújo Matija Horvat Matin Rahmanian Matt @@ -301,6 +331,7 @@ Matt Brender Matt Dainty Matt Gaunt Matt Landis +Matt Mencel Matt Moore Matt Simons Matthew Reidy @@ -312,8 +343,11 @@ Michał Glapa Michelangelo Morrillo Miguel Elias dos Santos Mike Chen +Mishin Nikolai mohammad ali <2018cs92@student.uet.edu.pk> Mohammed AlDujaili +Mohammed Nafees +Mudit Mukundan Senthil Munia Balayil Mustafa Abban @@ -321,16 +355,20 @@ Nadav Kaner Naoki Kanatani Nathan VanBenschoten Navaneeth Suresh +Nayeem Hasan Neal Caffery Neil O'Toole +Nicholas Herring Nick Miyake Nick Platt Nick Spragg Nicolas Chapurlat Nikhita Raghunath +Nikita Pivkin Nilesh Singh Noah Hanjun Lee Noah Zoschke +Noble Varghese ns-cweber nxya Ole Orhagen @@ -358,6 +396,7 @@ Pierce McEntagart Pierre Carrier Piotr Zurek Piyush Chugh +Pj Meyer Pratik Mallya Qais Patankar Quang Le Hong @@ -370,6 +409,7 @@ Rafael Aramizu Gomes Rajat Jindal Rajendra arora Rajkumar +Ramesh Gaikwad Ranbir Singh Ravi Shekhar Jethani RaviTeja Pothana @@ -379,18 +419,23 @@ Reetuparna Mukherjee reeves122 Reinier Timmer Renjith R +Rez Moss +Riaje Ricco Førgaard Richard de Vries Rob Figueiredo Rohit Upadhyay Rojan Dinc +Roming22 Ronak Jain Ronan Pelliard Ross Gustafson Ruben Vereecken +Rufina Talalaeva Russell Boley Ryan Leung Ryan Lower +Ryan Skidmore Ryo Nakao Saaarah Safwan Olaimat @@ -431,6 +476,7 @@ Steve Teuber Stian Eikeland Suhaib Mujahid sushmita wable +Sven Palberg Szymon Kodrebski Søren Hansen T.J. Corrigan @@ -451,6 +497,9 @@ Tingluo Huang tkhandel Tobias Gesellchen Tom Payne +Tomasz Adam Skrzypczak +tomfeigin +Travis Tomsu Trey Tacon tsbkw ttacon @@ -482,7 +531,8 @@ Yurii Soldak Yusef Mohamadi Yusuke Kuoka Zach Latta +Ze Peng zhouhaibing089 六开箱 缘生 -蒋航 \ No newline at end of file +蒋航 diff --git a/vendor/github.com/google/go-github/v62/LICENSE b/vendor/github.com/google/go-github/v66/LICENSE similarity index 100% rename from vendor/github.com/google/go-github/v62/LICENSE rename to vendor/github.com/google/go-github/v66/LICENSE diff --git a/vendor/github.com/google/go-github/v62/github/actions.go b/vendor/github.com/google/go-github/v66/github/actions.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/actions.go rename to vendor/github.com/google/go-github/v66/github/actions.go diff --git a/vendor/github.com/google/go-github/v62/github/actions_artifacts.go b/vendor/github.com/google/go-github/v66/github/actions_artifacts.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/actions_artifacts.go rename to vendor/github.com/google/go-github/v66/github/actions_artifacts.go diff --git a/vendor/github.com/google/go-github/v62/github/actions_cache.go b/vendor/github.com/google/go-github/v66/github/actions_cache.go similarity index 98% rename from vendor/github.com/google/go-github/v62/github/actions_cache.go rename to vendor/github.com/google/go-github/v66/github/actions_cache.go index 271d7d820..852e9860f 100644 --- a/vendor/github.com/google/go-github/v62/github/actions_cache.go +++ b/vendor/github.com/google/go-github/v66/github/actions_cache.go @@ -173,7 +173,7 @@ func (s *ActionsService) GetCacheUsageForRepo(ctx context.Context, owner, repo s // refreshed approximately every 5 minutes, so values returned from this endpoint may take at least 5 minutes to get updated. // // Permissions: You must authenticate using an access token with the read:org scope to use this endpoint. -// GitHub Apps must have the organization_admistration:read permission to use this endpoint. +// GitHub Apps must have the organization_administration:read permission to use this endpoint. // // GitHub API docs: https://docs.github.com/rest/actions/cache#list-repositories-with-github-actions-cache-usage-for-an-organization // @@ -203,7 +203,7 @@ func (s *ActionsService) ListCacheUsageByRepoForOrg(ctx context.Context, org str // 5 minutes, so values returned from this endpoint may take at least 5 minutes to get updated. // // Permissions: You must authenticate using an access token with the read:org scope to use this endpoint. -// GitHub Apps must have the organization_admistration:read permission to use this endpoint. +// GitHub Apps must have the organization_administration:read permission to use this endpoint. // // GitHub API docs: https://docs.github.com/rest/actions/cache#get-github-actions-cache-usage-for-an-organization // diff --git a/vendor/github.com/google/go-github/v62/github/actions_oidc.go b/vendor/github.com/google/go-github/v66/github/actions_oidc.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/actions_oidc.go rename to vendor/github.com/google/go-github/v66/github/actions_oidc.go diff --git a/vendor/github.com/google/go-github/v62/github/actions_permissions_enterprise.go b/vendor/github.com/google/go-github/v66/github/actions_permissions_enterprise.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/actions_permissions_enterprise.go rename to vendor/github.com/google/go-github/v66/github/actions_permissions_enterprise.go diff --git a/vendor/github.com/google/go-github/v62/github/actions_permissions_orgs.go b/vendor/github.com/google/go-github/v66/github/actions_permissions_orgs.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/actions_permissions_orgs.go rename to vendor/github.com/google/go-github/v66/github/actions_permissions_orgs.go diff --git a/vendor/github.com/google/go-github/v62/github/actions_required_workflows.go b/vendor/github.com/google/go-github/v66/github/actions_required_workflows.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/actions_required_workflows.go rename to vendor/github.com/google/go-github/v66/github/actions_required_workflows.go diff --git a/vendor/github.com/google/go-github/v62/github/actions_runner_groups.go b/vendor/github.com/google/go-github/v66/github/actions_runner_groups.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/actions_runner_groups.go rename to vendor/github.com/google/go-github/v66/github/actions_runner_groups.go diff --git a/vendor/github.com/google/go-github/v62/github/actions_runners.go b/vendor/github.com/google/go-github/v66/github/actions_runners.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/actions_runners.go rename to vendor/github.com/google/go-github/v66/github/actions_runners.go diff --git a/vendor/github.com/google/go-github/v62/github/actions_secrets.go b/vendor/github.com/google/go-github/v66/github/actions_secrets.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/actions_secrets.go rename to vendor/github.com/google/go-github/v66/github/actions_secrets.go diff --git a/vendor/github.com/google/go-github/v62/github/actions_variables.go b/vendor/github.com/google/go-github/v66/github/actions_variables.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/actions_variables.go rename to vendor/github.com/google/go-github/v66/github/actions_variables.go diff --git a/vendor/github.com/google/go-github/v62/github/actions_workflow_jobs.go b/vendor/github.com/google/go-github/v66/github/actions_workflow_jobs.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/actions_workflow_jobs.go rename to vendor/github.com/google/go-github/v66/github/actions_workflow_jobs.go diff --git a/vendor/github.com/google/go-github/v62/github/actions_workflow_runs.go b/vendor/github.com/google/go-github/v66/github/actions_workflow_runs.go similarity index 84% rename from vendor/github.com/google/go-github/v62/github/actions_workflow_runs.go rename to vendor/github.com/google/go-github/v66/github/actions_workflow_runs.go index bc7afe9e9..122ea1d0e 100644 --- a/vendor/github.com/google/go-github/v62/github/actions_workflow_runs.go +++ b/vendor/github.com/google/go-github/v66/github/actions_workflow_runs.go @@ -19,6 +19,7 @@ type WorkflowRun struct { NodeID *string `json:"node_id,omitempty"` HeadBranch *string `json:"head_branch,omitempty"` HeadSHA *string `json:"head_sha,omitempty"` + Path *string `json:"path,omitempty"` RunNumber *int `json:"run_number,omitempty"` RunAttempt *int `json:"run_attempt,omitempty"` Event *string `json:"event,omitempty"` @@ -111,6 +112,31 @@ type ReferencedWorkflow struct { Ref *string `json:"ref,omitempty"` } +// PendingDeployment represents the pending_deployments response. +type PendingDeployment struct { + Environment *PendingDeploymentEnvironment `json:"environment,omitempty"` + WaitTimer *int64 `json:"wait_timer,omitempty"` + WaitTimerStartedAt *Timestamp `json:"wait_timer_started_at,omitempty"` + CurrentUserCanApprove *bool `json:"current_user_can_approve,omitempty"` + Reviewers []*RequiredReviewer `json:"reviewers,omitempty"` +} + +// PendingDeploymentEnvironment represents pending deployment environment properties. +type PendingDeploymentEnvironment struct { + ID *int64 `json:"id,omitempty"` + NodeID *string `json:"node_id,omitempty"` + Name *string `json:"name,omitempty"` + URL *string `json:"url,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` +} + +// ReviewCustomDeploymentProtectionRuleRequest specifies the parameters to ReviewCustomDeploymentProtectionRule. +type ReviewCustomDeploymentProtectionRuleRequest struct { + EnvironmentName string `json:"environment_name"` + State string `json:"state"` + Comment string `json:"comment"` +} + func (s *ActionsService) listWorkflowRuns(ctx context.Context, endpoint string, opts *ListWorkflowRunsOptions) (*WorkflowRuns, *Response, error) { u, err := addOptions(endpoint, opts) if err != nil { @@ -387,6 +413,28 @@ func (s *ActionsService) GetWorkflowRunUsageByID(ctx context.Context, owner, rep return workflowRunUsage, resp, nil } +// GetPendingDeployments get all deployment environments for a workflow run that are waiting for protection rules to pass. +// +// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#get-pending-deployments-for-a-workflow-run +// +//meta:operation GET /repos/{owner}/{repo}/actions/runs/{run_id}/pending_deployments +func (s *ActionsService) GetPendingDeployments(ctx context.Context, owner, repo string, runID int64) ([]*PendingDeployment, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/pending_deployments", owner, repo, runID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var deployments []*PendingDeployment + resp, err := s.client.Do(ctx, req, &deployments) + if err != nil { + return nil, resp, err + } + + return deployments, resp, nil +} + // PendingDeployments approve or reject pending deployments that are waiting on approval by a required reviewer. // // GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#review-pending-deployments-for-a-workflow-run @@ -408,3 +456,20 @@ func (s *ActionsService) PendingDeployments(ctx context.Context, owner, repo str return deployments, resp, nil } + +// ReviewCustomDeploymentProtectionRule approves or rejects custom deployment protection rules provided by a GitHub App for a workflow run. +// +// GitHub API docs: https://docs.github.com/rest/actions/workflow-runs#review-custom-deployment-protection-rules-for-a-workflow-run +// +//meta:operation POST /repos/{owner}/{repo}/actions/runs/{run_id}/deployment_protection_rule +func (s *ActionsService) ReviewCustomDeploymentProtectionRule(ctx context.Context, owner, repo string, runID int64, request *ReviewCustomDeploymentProtectionRuleRequest) (*Response, error) { + u := fmt.Sprintf("repos/%v/%v/actions/runs/%v/deployment_protection_rule", owner, repo, runID) + + req, err := s.client.NewRequest("POST", u, request) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + return resp, err +} diff --git a/vendor/github.com/google/go-github/v62/github/actions_workflows.go b/vendor/github.com/google/go-github/v66/github/actions_workflows.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/actions_workflows.go rename to vendor/github.com/google/go-github/v66/github/actions_workflows.go diff --git a/vendor/github.com/google/go-github/v62/github/activity.go b/vendor/github.com/google/go-github/v66/github/activity.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/activity.go rename to vendor/github.com/google/go-github/v66/github/activity.go diff --git a/vendor/github.com/google/go-github/v62/github/activity_events.go b/vendor/github.com/google/go-github/v66/github/activity_events.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/activity_events.go rename to vendor/github.com/google/go-github/v66/github/activity_events.go diff --git a/vendor/github.com/google/go-github/v62/github/activity_notifications.go b/vendor/github.com/google/go-github/v66/github/activity_notifications.go similarity index 92% rename from vendor/github.com/google/go-github/v62/github/activity_notifications.go rename to vendor/github.com/google/go-github/v66/github/activity_notifications.go index 47f22261d..e712323ed 100644 --- a/vendor/github.com/google/go-github/v62/github/activity_notifications.go +++ b/vendor/github.com/google/go-github/v66/github/activity_notifications.go @@ -178,6 +178,23 @@ func (s *ActivityService) MarkThreadRead(ctx context.Context, id string) (*Respo return s.client.Do(ctx, req, nil) } +// MarkThreadDone marks the specified thread as done. +// Marking a thread as "done" is equivalent to marking a notification in your notification inbox on GitHub as done. +// +// GitHub API docs: https://docs.github.com/rest/activity/notifications#mark-a-thread-as-done +// +//meta:operation DELETE /notifications/threads/{thread_id} +func (s *ActivityService) MarkThreadDone(ctx context.Context, id int64) (*Response, error) { + u := fmt.Sprintf("notifications/threads/%v", id) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + // GetThreadSubscription checks to see if the authenticated user is subscribed // to a thread. // diff --git a/vendor/github.com/google/go-github/v62/github/activity_star.go b/vendor/github.com/google/go-github/v66/github/activity_star.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/activity_star.go rename to vendor/github.com/google/go-github/v66/github/activity_star.go diff --git a/vendor/github.com/google/go-github/v62/github/activity_watching.go b/vendor/github.com/google/go-github/v66/github/activity_watching.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/activity_watching.go rename to vendor/github.com/google/go-github/v66/github/activity_watching.go diff --git a/vendor/github.com/google/go-github/v62/github/admin.go b/vendor/github.com/google/go-github/v66/github/admin.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/admin.go rename to vendor/github.com/google/go-github/v66/github/admin.go diff --git a/vendor/github.com/google/go-github/v62/github/admin_orgs.go b/vendor/github.com/google/go-github/v66/github/admin_orgs.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/admin_orgs.go rename to vendor/github.com/google/go-github/v66/github/admin_orgs.go diff --git a/vendor/github.com/google/go-github/v62/github/admin_stats.go b/vendor/github.com/google/go-github/v66/github/admin_stats.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/admin_stats.go rename to vendor/github.com/google/go-github/v66/github/admin_stats.go diff --git a/vendor/github.com/google/go-github/v62/github/admin_users.go b/vendor/github.com/google/go-github/v66/github/admin_users.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/admin_users.go rename to vendor/github.com/google/go-github/v66/github/admin_users.go diff --git a/vendor/github.com/google/go-github/v62/github/apps.go b/vendor/github.com/google/go-github/v66/github/apps.go similarity index 81% rename from vendor/github.com/google/go-github/v62/github/apps.go rename to vendor/github.com/google/go-github/v66/github/apps.go index 6969daba6..2f74c2c26 100644 --- a/vendor/github.com/google/go-github/v62/github/apps.go +++ b/vendor/github.com/google/go-github/v66/github/apps.go @@ -77,44 +77,48 @@ type InstallationTokenListRepoOptions struct { // https://docs.github.com/enterprise-server@3.0/rest/apps#create-an-installation-access-token-for-an-app // https://docs.github.com/rest/apps#create-an-installation-access-token-for-an-app type InstallationPermissions struct { - Actions *string `json:"actions,omitempty"` - Administration *string `json:"administration,omitempty"` - Blocking *string `json:"blocking,omitempty"` - Checks *string `json:"checks,omitempty"` - Contents *string `json:"contents,omitempty"` - ContentReferences *string `json:"content_references,omitempty"` - Deployments *string `json:"deployments,omitempty"` - Emails *string `json:"emails,omitempty"` - Environments *string `json:"environments,omitempty"` - Followers *string `json:"followers,omitempty"` - Issues *string `json:"issues,omitempty"` - Metadata *string `json:"metadata,omitempty"` - Members *string `json:"members,omitempty"` - OrganizationAdministration *string `json:"organization_administration,omitempty"` - OrganizationCustomProperties *string `json:"organization_custom_properties,omitempty"` - OrganizationCustomRoles *string `json:"organization_custom_roles,omitempty"` - OrganizationHooks *string `json:"organization_hooks,omitempty"` - OrganizationPackages *string `json:"organization_packages,omitempty"` - OrganizationPlan *string `json:"organization_plan,omitempty"` - OrganizationPreReceiveHooks *string `json:"organization_pre_receive_hooks,omitempty"` - OrganizationProjects *string `json:"organization_projects,omitempty"` - OrganizationSecrets *string `json:"organization_secrets,omitempty"` - OrganizationSelfHostedRunners *string `json:"organization_self_hosted_runners,omitempty"` - OrganizationUserBlocking *string `json:"organization_user_blocking,omitempty"` - Packages *string `json:"packages,omitempty"` - Pages *string `json:"pages,omitempty"` - PullRequests *string `json:"pull_requests,omitempty"` - RepositoryHooks *string `json:"repository_hooks,omitempty"` - RepositoryProjects *string `json:"repository_projects,omitempty"` - RepositoryPreReceiveHooks *string `json:"repository_pre_receive_hooks,omitempty"` - Secrets *string `json:"secrets,omitempty"` - SecretScanningAlerts *string `json:"secret_scanning_alerts,omitempty"` - SecurityEvents *string `json:"security_events,omitempty"` - SingleFile *string `json:"single_file,omitempty"` - Statuses *string `json:"statuses,omitempty"` - TeamDiscussions *string `json:"team_discussions,omitempty"` - VulnerabilityAlerts *string `json:"vulnerability_alerts,omitempty"` - Workflows *string `json:"workflows,omitempty"` + Actions *string `json:"actions,omitempty"` + ActionsVariables *string `json:"actions_variables,omitempty"` + Administration *string `json:"administration,omitempty"` + Blocking *string `json:"blocking,omitempty"` + Checks *string `json:"checks,omitempty"` + Contents *string `json:"contents,omitempty"` + ContentReferences *string `json:"content_references,omitempty"` + Deployments *string `json:"deployments,omitempty"` + Emails *string `json:"emails,omitempty"` + Environments *string `json:"environments,omitempty"` + Followers *string `json:"followers,omitempty"` + Issues *string `json:"issues,omitempty"` + Metadata *string `json:"metadata,omitempty"` + Members *string `json:"members,omitempty"` + OrganizationAdministration *string `json:"organization_administration,omitempty"` + OrganizationCustomProperties *string `json:"organization_custom_properties,omitempty"` + OrganizationCustomRoles *string `json:"organization_custom_roles,omitempty"` + OrganizationCustomOrgRoles *string `json:"organization_custom_org_roles,omitempty"` + OrganizationHooks *string `json:"organization_hooks,omitempty"` + OrganizationPackages *string `json:"organization_packages,omitempty"` + OrganizationPersonalAccessTokens *string `json:"organization_personal_access_tokens,omitempty"` + OrganizationPersonalAccessTokenRequests *string `json:"organization_personal_access_token_requests,omitempty"` + OrganizationPlan *string `json:"organization_plan,omitempty"` + OrganizationPreReceiveHooks *string `json:"organization_pre_receive_hooks,omitempty"` + OrganizationProjects *string `json:"organization_projects,omitempty"` + OrganizationSecrets *string `json:"organization_secrets,omitempty"` + OrganizationSelfHostedRunners *string `json:"organization_self_hosted_runners,omitempty"` + OrganizationUserBlocking *string `json:"organization_user_blocking,omitempty"` + Packages *string `json:"packages,omitempty"` + Pages *string `json:"pages,omitempty"` + PullRequests *string `json:"pull_requests,omitempty"` + RepositoryHooks *string `json:"repository_hooks,omitempty"` + RepositoryProjects *string `json:"repository_projects,omitempty"` + RepositoryPreReceiveHooks *string `json:"repository_pre_receive_hooks,omitempty"` + Secrets *string `json:"secrets,omitempty"` + SecretScanningAlerts *string `json:"secret_scanning_alerts,omitempty"` + SecurityEvents *string `json:"security_events,omitempty"` + SingleFile *string `json:"single_file,omitempty"` + Statuses *string `json:"statuses,omitempty"` + TeamDiscussions *string `json:"team_discussions,omitempty"` + VulnerabilityAlerts *string `json:"vulnerability_alerts,omitempty"` + Workflows *string `json:"workflows,omitempty"` } // InstallationRequest represents a pending GitHub App installation request. diff --git a/vendor/github.com/google/go-github/v62/github/apps_hooks.go b/vendor/github.com/google/go-github/v66/github/apps_hooks.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/apps_hooks.go rename to vendor/github.com/google/go-github/v66/github/apps_hooks.go diff --git a/vendor/github.com/google/go-github/v62/github/apps_hooks_deliveries.go b/vendor/github.com/google/go-github/v66/github/apps_hooks_deliveries.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/apps_hooks_deliveries.go rename to vendor/github.com/google/go-github/v66/github/apps_hooks_deliveries.go diff --git a/vendor/github.com/google/go-github/v62/github/apps_installation.go b/vendor/github.com/google/go-github/v66/github/apps_installation.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/apps_installation.go rename to vendor/github.com/google/go-github/v66/github/apps_installation.go diff --git a/vendor/github.com/google/go-github/v62/github/apps_manifest.go b/vendor/github.com/google/go-github/v66/github/apps_manifest.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/apps_manifest.go rename to vendor/github.com/google/go-github/v66/github/apps_manifest.go diff --git a/vendor/github.com/google/go-github/v62/github/apps_marketplace.go b/vendor/github.com/google/go-github/v66/github/apps_marketplace.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/apps_marketplace.go rename to vendor/github.com/google/go-github/v66/github/apps_marketplace.go diff --git a/vendor/github.com/google/go-github/v62/github/authorizations.go b/vendor/github.com/google/go-github/v66/github/authorizations.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/authorizations.go rename to vendor/github.com/google/go-github/v66/github/authorizations.go diff --git a/vendor/github.com/google/go-github/v62/github/billing.go b/vendor/github.com/google/go-github/v66/github/billing.go similarity index 93% rename from vendor/github.com/google/go-github/v62/github/billing.go rename to vendor/github.com/google/go-github/v66/github/billing.go index 6d7579b88..09b1a5d01 100644 --- a/vendor/github.com/google/go-github/v62/github/billing.go +++ b/vendor/github.com/google/go-github/v66/github/billing.go @@ -43,8 +43,11 @@ type StorageBilling struct { // ActiveCommitters represents the total active committers across all repositories in an Organization. type ActiveCommitters struct { - TotalAdvancedSecurityCommitters int `json:"total_advanced_security_committers"` - Repositories []*RepositoryActiveCommitters `json:"repositories,omitempty"` + TotalAdvancedSecurityCommitters int `json:"total_advanced_security_committers"` + TotalCount int `json:"total_count"` + MaximumAdvancedSecurityCommitters int `json:"maximum_advanced_security_committers"` + PurchasedAdvancedSecurityCommitters int `json:"purchased_advanced_security_committers"` + Repositories []*RepositoryActiveCommitters `json:"repositories,omitempty"` } // RepositoryActiveCommitters represents active committers on each repository. diff --git a/vendor/github.com/google/go-github/v62/github/checks.go b/vendor/github.com/google/go-github/v66/github/checks.go similarity index 98% rename from vendor/github.com/google/go-github/v62/github/checks.go rename to vendor/github.com/google/go-github/v66/github/checks.go index a86189445..71e50c15f 100644 --- a/vendor/github.com/google/go-github/v62/github/checks.go +++ b/vendor/github.com/google/go-github/v66/github/checks.go @@ -85,7 +85,10 @@ type CheckSuite struct { PullRequests []*PullRequest `json:"pull_requests,omitempty"` // The following fields are only populated by Webhook events. - HeadCommit *Commit `json:"head_commit,omitempty"` + HeadCommit *Commit `json:"head_commit,omitempty"` + LatestCheckRunsCount *int64 `json:"latest_check_runs_count,omitempty"` + Rerequstable *bool `json:"rerequestable,omitempty"` + RunsRerequstable *bool `json:"runs_rerequestable,omitempty"` } func (c CheckRun) String() string { diff --git a/vendor/github.com/google/go-github/v62/github/code-scanning.go b/vendor/github.com/google/go-github/v66/github/code-scanning.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/code-scanning.go rename to vendor/github.com/google/go-github/v66/github/code-scanning.go diff --git a/vendor/github.com/google/go-github/v62/github/codesofconduct.go b/vendor/github.com/google/go-github/v66/github/codesofconduct.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/codesofconduct.go rename to vendor/github.com/google/go-github/v66/github/codesofconduct.go diff --git a/vendor/github.com/google/go-github/v62/github/codespaces.go b/vendor/github.com/google/go-github/v66/github/codespaces.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/codespaces.go rename to vendor/github.com/google/go-github/v66/github/codespaces.go diff --git a/vendor/github.com/google/go-github/v62/github/codespaces_secrets.go b/vendor/github.com/google/go-github/v66/github/codespaces_secrets.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/codespaces_secrets.go rename to vendor/github.com/google/go-github/v66/github/codespaces_secrets.go diff --git a/vendor/github.com/google/go-github/v62/github/copilot.go b/vendor/github.com/google/go-github/v66/github/copilot.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/copilot.go rename to vendor/github.com/google/go-github/v66/github/copilot.go diff --git a/vendor/github.com/google/go-github/v62/github/dependabot.go b/vendor/github.com/google/go-github/v66/github/dependabot.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/dependabot.go rename to vendor/github.com/google/go-github/v66/github/dependabot.go diff --git a/vendor/github.com/google/go-github/v62/github/dependabot_alerts.go b/vendor/github.com/google/go-github/v66/github/dependabot_alerts.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/dependabot_alerts.go rename to vendor/github.com/google/go-github/v66/github/dependabot_alerts.go diff --git a/vendor/github.com/google/go-github/v62/github/dependabot_secrets.go b/vendor/github.com/google/go-github/v66/github/dependabot_secrets.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/dependabot_secrets.go rename to vendor/github.com/google/go-github/v66/github/dependabot_secrets.go diff --git a/vendor/github.com/google/go-github/v62/github/dependency_graph.go b/vendor/github.com/google/go-github/v66/github/dependency_graph.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/dependency_graph.go rename to vendor/github.com/google/go-github/v66/github/dependency_graph.go diff --git a/vendor/github.com/google/go-github/v62/github/dependency_graph_snapshots.go b/vendor/github.com/google/go-github/v66/github/dependency_graph_snapshots.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/dependency_graph_snapshots.go rename to vendor/github.com/google/go-github/v66/github/dependency_graph_snapshots.go diff --git a/vendor/github.com/google/go-github/v62/github/doc.go b/vendor/github.com/google/go-github/v66/github/doc.go similarity index 98% rename from vendor/github.com/google/go-github/v62/github/doc.go rename to vendor/github.com/google/go-github/v66/github/doc.go index e0c810871..7196394fb 100644 --- a/vendor/github.com/google/go-github/v62/github/doc.go +++ b/vendor/github.com/google/go-github/v66/github/doc.go @@ -8,7 +8,7 @@ Package github provides a client for using the GitHub API. Usage: - import "github.com/google/go-github/v62/github" // with go modules enabled (GO111MODULE=on or outside GOPATH) + import "github.com/google/go-github/v66/github" // with go modules enabled (GO111MODULE=on or outside GOPATH) import "github.com/google/go-github/github" // with go modules disabled Construct a new GitHub client, then use the various services on the client to @@ -31,7 +31,7 @@ The services of a client divide the API into logical chunks and correspond to the structure of the GitHub API documentation at https://docs.github.com/rest . -NOTE: Using the https://godoc.org/context package, one can easily +NOTE: Using the https://pkg.go.dev/context package, one can easily pass cancelation signals and deadlines to various services of the client for handling a request. In case there is no context available, then context.Background() can be used as a starting point. diff --git a/vendor/github.com/google/go-github/v62/github/emojis.go b/vendor/github.com/google/go-github/v66/github/emojis.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/emojis.go rename to vendor/github.com/google/go-github/v66/github/emojis.go diff --git a/vendor/github.com/google/go-github/v62/github/enterprise.go b/vendor/github.com/google/go-github/v66/github/enterprise.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/enterprise.go rename to vendor/github.com/google/go-github/v66/github/enterprise.go diff --git a/vendor/github.com/google/go-github/v62/github/enterprise_actions_runner_groups.go b/vendor/github.com/google/go-github/v66/github/enterprise_actions_runner_groups.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/enterprise_actions_runner_groups.go rename to vendor/github.com/google/go-github/v66/github/enterprise_actions_runner_groups.go diff --git a/vendor/github.com/google/go-github/v62/github/enterprise_actions_runners.go b/vendor/github.com/google/go-github/v66/github/enterprise_actions_runners.go similarity index 83% rename from vendor/github.com/google/go-github/v62/github/enterprise_actions_runners.go rename to vendor/github.com/google/go-github/v66/github/enterprise_actions_runners.go index 4a6e6b52c..fa345aea8 100644 --- a/vendor/github.com/google/go-github/v62/github/enterprise_actions_runners.go +++ b/vendor/github.com/google/go-github/v66/github/enterprise_actions_runners.go @@ -80,7 +80,7 @@ func (s *EnterpriseService) CreateRegistrationToken(ctx context.Context, enterpr // GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/actions/self-hosted-runners#list-self-hosted-runners-for-an-enterprise // //meta:operation GET /enterprises/{enterprise}/actions/runners -func (s *EnterpriseService) ListRunners(ctx context.Context, enterprise string, opts *ListOptions) (*Runners, *Response, error) { +func (s *EnterpriseService) ListRunners(ctx context.Context, enterprise string, opts *ListRunnersOptions) (*Runners, *Response, error) { u := fmt.Sprintf("enterprises/%v/actions/runners", enterprise) u, err := addOptions(u, opts) if err != nil { @@ -101,6 +101,27 @@ func (s *EnterpriseService) ListRunners(ctx context.Context, enterprise string, return runners, resp, nil } +// GetRunner gets a specific self-hosted runner configured in an enterprise. +// +// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/actions/self-hosted-runners#get-a-self-hosted-runner-for-an-enterprise +// +//meta:operation GET /enterprises/{enterprise}/actions/runners/{runner_id} +func (s *EnterpriseService) GetRunner(ctx context.Context, enterprise string, runnerID int64) (*Runner, *Response, error) { + u := fmt.Sprintf("enterprises/%v/actions/runners/%v", enterprise, runnerID) + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + runner := new(Runner) + resp, err := s.client.Do(ctx, req, runner) + if err != nil { + return nil, resp, err + } + + return runner, resp, nil +} + // RemoveRunner forces the removal of a self-hosted runner from an enterprise using the runner id. // // GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/actions/self-hosted-runners#delete-a-self-hosted-runner-from-an-enterprise diff --git a/vendor/github.com/google/go-github/v62/github/enterprise_audit_log.go b/vendor/github.com/google/go-github/v66/github/enterprise_audit_log.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/enterprise_audit_log.go rename to vendor/github.com/google/go-github/v66/github/enterprise_audit_log.go diff --git a/vendor/github.com/google/go-github/v62/github/enterprise_code_security_and_analysis.go b/vendor/github.com/google/go-github/v66/github/enterprise_code_security_and_analysis.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/enterprise_code_security_and_analysis.go rename to vendor/github.com/google/go-github/v66/github/enterprise_code_security_and_analysis.go diff --git a/vendor/github.com/google/go-github/v62/github/event.go b/vendor/github.com/google/go-github/v66/github/event.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/event.go rename to vendor/github.com/google/go-github/v66/github/event.go diff --git a/vendor/github.com/google/go-github/v62/github/event_types.go b/vendor/github.com/google/go-github/v66/github/event_types.go similarity index 93% rename from vendor/github.com/google/go-github/v62/github/event_types.go rename to vendor/github.com/google/go-github/v66/github/event_types.go index e5ae33a5f..fbe56b20d 100644 --- a/vendor/github.com/google/go-github/v62/github/event_types.go +++ b/vendor/github.com/google/go-github/v66/github/event_types.go @@ -225,6 +225,46 @@ type DeploymentProtectionRuleEvent struct { Installation *Installation `json:"installation,omitempty"` } +// DeploymentReviewEvent represents a deployment review event. +// The Webhook event name is "deployment_review". +// +// GitHub API docs: https://docs.github.com/webhooks-and-events/webhooks/webhook-events-and-payloads?#deployment_review +type DeploymentReviewEvent struct { + // The action performed. Possible values are: "requested", "approved", or "rejected". + Action *string `json:"action,omitempty"` + + // The following will be populated only if requested. + Requester *User `json:"requester,omitempty"` + Environment *string `json:"environment,omitempty"` + + // The following will be populated only if approved or rejected. + Approver *User `json:"approver,omitempty"` + Comment *string `json:"comment,omitempty"` + WorkflowJobRuns []*WorkflowJobRun `json:"workflow_job_runs,omitempty"` + + Enterprise *Enterprise `json:"enterprise,omitempty"` + Installation *Installation `json:"installation,omitempty"` + Organization *Organization `json:"organization,omitempty"` + Repo *Repository `json:"repository,omitempty"` + Reviewers []*RequiredReviewer `json:"reviewers,omitempty"` + Sender *User `json:"sender,omitempty"` + Since *string `json:"since,omitempty"` + WorkflowJobRun *WorkflowJobRun `json:"workflow_job_run,omitempty"` + WorkflowRun *WorkflowRun `json:"workflow_run,omitempty"` +} + +// WorkflowJobRun represents a workflow_job_run in a GitHub DeploymentReviewEvent. +type WorkflowJobRun struct { + Conclusion *string `json:"conclusion,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + Environment *string `json:"environment,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` + ID *int64 `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Status *string `json:"status,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` +} + // DeploymentStatusEvent represents a deployment status. // The Webhook event name is "deployment_status". // @@ -1348,44 +1388,44 @@ func (h HeadCommit) String() string { // PushEventRepository represents the repo object in a PushEvent payload. type PushEventRepository struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Name *string `json:"name,omitempty"` - FullName *string `json:"full_name,omitempty"` - Owner *User `json:"owner,omitempty"` - Private *bool `json:"private,omitempty"` - Description *string `json:"description,omitempty"` - Fork *bool `json:"fork,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - PushedAt *Timestamp `json:"pushed_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - Homepage *string `json:"homepage,omitempty"` - PullsURL *string `json:"pulls_url,omitempty"` - Size *int `json:"size,omitempty"` - StargazersCount *int `json:"stargazers_count,omitempty"` - WatchersCount *int `json:"watchers_count,omitempty"` - Language *string `json:"language,omitempty"` - HasIssues *bool `json:"has_issues,omitempty"` - HasDownloads *bool `json:"has_downloads,omitempty"` - HasWiki *bool `json:"has_wiki,omitempty"` - HasPages *bool `json:"has_pages,omitempty"` - ForksCount *int `json:"forks_count,omitempty"` - Archived *bool `json:"archived,omitempty"` - Disabled *bool `json:"disabled,omitempty"` - OpenIssuesCount *int `json:"open_issues_count,omitempty"` - DefaultBranch *string `json:"default_branch,omitempty"` - MasterBranch *string `json:"master_branch,omitempty"` - Organization *string `json:"organization,omitempty"` - URL *string `json:"url,omitempty"` - ArchiveURL *string `json:"archive_url,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - StatusesURL *string `json:"statuses_url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - SSHURL *string `json:"ssh_url,omitempty"` - CloneURL *string `json:"clone_url,omitempty"` - SVNURL *string `json:"svn_url,omitempty"` - Topics []string `json:"topics,omitempty"` - CustomProperties map[string]string `json:"custom_properties,omitempty"` + ID *int64 `json:"id,omitempty"` + NodeID *string `json:"node_id,omitempty"` + Name *string `json:"name,omitempty"` + FullName *string `json:"full_name,omitempty"` + Owner *User `json:"owner,omitempty"` + Private *bool `json:"private,omitempty"` + Description *string `json:"description,omitempty"` + Fork *bool `json:"fork,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + PushedAt *Timestamp `json:"pushed_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + Homepage *string `json:"homepage,omitempty"` + PullsURL *string `json:"pulls_url,omitempty"` + Size *int `json:"size,omitempty"` + StargazersCount *int `json:"stargazers_count,omitempty"` + WatchersCount *int `json:"watchers_count,omitempty"` + Language *string `json:"language,omitempty"` + HasIssues *bool `json:"has_issues,omitempty"` + HasDownloads *bool `json:"has_downloads,omitempty"` + HasWiki *bool `json:"has_wiki,omitempty"` + HasPages *bool `json:"has_pages,omitempty"` + ForksCount *int `json:"forks_count,omitempty"` + Archived *bool `json:"archived,omitempty"` + Disabled *bool `json:"disabled,omitempty"` + OpenIssuesCount *int `json:"open_issues_count,omitempty"` + DefaultBranch *string `json:"default_branch,omitempty"` + MasterBranch *string `json:"master_branch,omitempty"` + Organization *string `json:"organization,omitempty"` + URL *string `json:"url,omitempty"` + ArchiveURL *string `json:"archive_url,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` + StatusesURL *string `json:"statuses_url,omitempty"` + GitURL *string `json:"git_url,omitempty"` + SSHURL *string `json:"ssh_url,omitempty"` + CloneURL *string `json:"clone_url,omitempty"` + SVNURL *string `json:"svn_url,omitempty"` + Topics []string `json:"topics,omitempty"` + CustomProperties map[string]interface{} `json:"custom_properties,omitempty"` } // PushEventRepoOwner is a basic representation of user/org in a PushEvent payload. @@ -1821,3 +1861,27 @@ type CodeScanningAlertEvent struct { Installation *Installation `json:"installation,omitempty"` } + +// SponsorshipEvent represents a sponsorship event in GitHub. +// +// GitHub API docs: https://docs.github.com/en/rest/overview/github-event-types?apiVersion=2022-11-28#sponsorshipevent +type SponsorshipEvent struct { + Action *string `json:"action,omitempty"` + EffectiveDate *string `json:"effective_date,omitempty"` + Changes *SponsorshipChanges `json:"changes,omitempty"` + Repository *Repository `json:"repository,omitempty"` + Organization *Organization `json:"organization,omitempty"` + Sender *User `json:"sender,omitempty"` + Installation *Installation `json:"installation,omitempty"` +} + +// SponsorshipChanges represents changes made to the sponsorship. +type SponsorshipChanges struct { + Tier *SponsorshipTier `json:"tier,omitempty"` + PrivacyLevel *string `json:"privacy_level,omitempty"` +} + +// SponsorshipTier represents the tier information of a sponsorship. +type SponsorshipTier struct { + From *string `json:"from,omitempty"` +} diff --git a/vendor/github.com/google/go-github/v62/github/gists.go b/vendor/github.com/google/go-github/v66/github/gists.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/gists.go rename to vendor/github.com/google/go-github/v66/github/gists.go diff --git a/vendor/github.com/google/go-github/v62/github/gists_comments.go b/vendor/github.com/google/go-github/v66/github/gists_comments.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/gists_comments.go rename to vendor/github.com/google/go-github/v66/github/gists_comments.go diff --git a/vendor/github.com/google/go-github/v62/github/git.go b/vendor/github.com/google/go-github/v66/github/git.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/git.go rename to vendor/github.com/google/go-github/v66/github/git.go diff --git a/vendor/github.com/google/go-github/v62/github/git_blobs.go b/vendor/github.com/google/go-github/v66/github/git_blobs.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/git_blobs.go rename to vendor/github.com/google/go-github/v66/github/git_blobs.go diff --git a/vendor/github.com/google/go-github/v62/github/git_commits.go b/vendor/github.com/google/go-github/v66/github/git_commits.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/git_commits.go rename to vendor/github.com/google/go-github/v66/github/git_commits.go diff --git a/vendor/github.com/google/go-github/v62/github/git_refs.go b/vendor/github.com/google/go-github/v66/github/git_refs.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/git_refs.go rename to vendor/github.com/google/go-github/v66/github/git_refs.go diff --git a/vendor/github.com/google/go-github/v62/github/git_tags.go b/vendor/github.com/google/go-github/v66/github/git_tags.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/git_tags.go rename to vendor/github.com/google/go-github/v66/github/git_tags.go diff --git a/vendor/github.com/google/go-github/v62/github/git_trees.go b/vendor/github.com/google/go-github/v66/github/git_trees.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/git_trees.go rename to vendor/github.com/google/go-github/v66/github/git_trees.go diff --git a/vendor/github.com/google/go-github/v62/github/github-accessors.go b/vendor/github.com/google/go-github/v66/github/github-accessors.go similarity index 97% rename from vendor/github.com/google/go-github/v62/github/github-accessors.go rename to vendor/github.com/google/go-github/v66/github/github-accessors.go index fd2fe2259..f8e303c43 100644 --- a/vendor/github.com/google/go-github/v62/github/github-accessors.go +++ b/vendor/github.com/google/go-github/v66/github/github-accessors.go @@ -662,6 +662,14 @@ func (a *AnalysesListOptions) GetSarifID() string { return *a.SarifID } +// GetDomains returns the Domains field. +func (a *APIMeta) GetDomains() *APIMetaDomains { + if a == nil { + return nil + } + return a.Domains +} + // GetSSHKeyFingerprints returns the SSHKeyFingerprints map if it's non-nil, an empty map otherwise. func (a *APIMeta) GetSSHKeyFingerprints() map[string]string { if a == nil || a.SSHKeyFingerprints == nil { @@ -678,6 +686,14 @@ func (a *APIMeta) GetVerifiablePasswordAuthentication() bool { return *a.VerifiablePasswordAuthentication } +// GetArtifactAttestations returns the ArtifactAttestations field. +func (a *APIMetaDomains) GetArtifactAttestations() *APIMetaArtifactAttestations { + if a == nil { + return nil + } + return a.ArtifactAttestations +} + // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. func (a *App) GetCreatedAt() Timestamp { if a == nil || a.CreatedAt == nil { @@ -2270,6 +2286,14 @@ func (c *CheckSuite) GetID() int64 { return *c.ID } +// GetLatestCheckRunsCount returns the LatestCheckRunsCount field if it's non-nil, zero value otherwise. +func (c *CheckSuite) GetLatestCheckRunsCount() int64 { + if c == nil || c.LatestCheckRunsCount == nil { + return 0 + } + return *c.LatestCheckRunsCount +} + // GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. func (c *CheckSuite) GetNodeID() string { if c == nil || c.NodeID == nil { @@ -2286,6 +2310,22 @@ func (c *CheckSuite) GetRepository() *Repository { return c.Repository } +// GetRerequstable returns the Rerequstable field if it's non-nil, zero value otherwise. +func (c *CheckSuite) GetRerequstable() bool { + if c == nil || c.Rerequstable == nil { + return false + } + return *c.Rerequstable +} + +// GetRunsRerequstable returns the RunsRerequstable field if it's non-nil, zero value otherwise. +func (c *CheckSuite) GetRunsRerequstable() bool { + if c == nil || c.RunsRerequstable == nil { + return false + } + return *c.RunsRerequstable +} + // GetStatus returns the Status field if it's non-nil, zero value otherwise. func (c *CheckSuite) GetStatus() string { if c == nil || c.Status == nil { @@ -4351,7 +4391,7 @@ func (c *CreateOrgInvitationOptions) GetRole() string { } // GetBaseRole returns the BaseRole field if it's non-nil, zero value otherwise. -func (c *CreateOrUpdateCustomRoleOptions) GetBaseRole() string { +func (c *CreateOrUpdateCustomRepoRoleOptions) GetBaseRole() string { if c == nil || c.BaseRole == nil { return "" } @@ -4359,7 +4399,7 @@ func (c *CreateOrUpdateCustomRoleOptions) GetBaseRole() string { } // GetDescription returns the Description field if it's non-nil, zero value otherwise. -func (c *CreateOrUpdateCustomRoleOptions) GetDescription() string { +func (c *CreateOrUpdateCustomRepoRoleOptions) GetDescription() string { if c == nil || c.Description == nil { return "" } @@ -4367,7 +4407,31 @@ func (c *CreateOrUpdateCustomRoleOptions) GetDescription() string { } // GetName returns the Name field if it's non-nil, zero value otherwise. -func (c *CreateOrUpdateCustomRoleOptions) GetName() string { +func (c *CreateOrUpdateCustomRepoRoleOptions) GetName() string { + if c == nil || c.Name == nil { + return "" + } + return *c.Name +} + +// GetBaseRole returns the BaseRole field if it's non-nil, zero value otherwise. +func (c *CreateOrUpdateOrgRoleOptions) GetBaseRole() string { + if c == nil || c.BaseRole == nil { + return "" + } + return *c.BaseRole +} + +// GetDescription returns the Description field if it's non-nil, zero value otherwise. +func (c *CreateOrUpdateOrgRoleOptions) GetDescription() string { + if c == nil || c.Description == nil { + return "" + } + return *c.Description +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (c *CreateOrUpdateOrgRoleOptions) GetName() string { if c == nil || c.Name == nil { return "" } @@ -4686,6 +4750,70 @@ func (c *CustomDeploymentProtectionRuleRequest) GetIntegrationID() int64 { return *c.IntegrationID } +// GetBaseRole returns the BaseRole field if it's non-nil, zero value otherwise. +func (c *CustomOrgRoles) GetBaseRole() string { + if c == nil || c.BaseRole == nil { + return "" + } + return *c.BaseRole +} + +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (c *CustomOrgRoles) GetCreatedAt() Timestamp { + if c == nil || c.CreatedAt == nil { + return Timestamp{} + } + return *c.CreatedAt +} + +// GetDescription returns the Description field if it's non-nil, zero value otherwise. +func (c *CustomOrgRoles) GetDescription() string { + if c == nil || c.Description == nil { + return "" + } + return *c.Description +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (c *CustomOrgRoles) GetID() int64 { + if c == nil || c.ID == nil { + return 0 + } + return *c.ID +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (c *CustomOrgRoles) GetName() string { + if c == nil || c.Name == nil { + return "" + } + return *c.Name +} + +// GetOrg returns the Org field. +func (c *CustomOrgRoles) GetOrg() *Organization { + if c == nil { + return nil + } + return c.Org +} + +// GetSource returns the Source field if it's non-nil, zero value otherwise. +func (c *CustomOrgRoles) GetSource() string { + if c == nil || c.Source == nil { + return "" + } + return *c.Source +} + +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (c *CustomOrgRoles) GetUpdatedAt() Timestamp { + if c == nil || c.UpdatedAt == nil { + return Timestamp{} + } + return *c.UpdatedAt +} + // GetDefaultValue returns the DefaultValue field if it's non-nil, zero value otherwise. func (c *CustomProperty) GetDefaultValue() string { if c == nil || c.DefaultValue == nil { @@ -4718,12 +4846,12 @@ func (c *CustomProperty) GetRequired() bool { return *c.Required } -// GetValue returns the Value field if it's non-nil, zero value otherwise. -func (c *CustomPropertyValue) GetValue() string { - if c == nil || c.Value == nil { +// GetValuesEditableBy returns the ValuesEditableBy field if it's non-nil, zero value otherwise. +func (c *CustomProperty) GetValuesEditableBy() string { + if c == nil || c.ValuesEditableBy == nil { return "" } - return *c.Value + return *c.ValuesEditableBy } // GetBaseRole returns the BaseRole field if it's non-nil, zero value otherwise. @@ -4734,6 +4862,14 @@ func (c *CustomRepoRoles) GetBaseRole() string { return *c.BaseRole } +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (c *CustomRepoRoles) GetCreatedAt() Timestamp { + if c == nil || c.CreatedAt == nil { + return Timestamp{} + } + return *c.CreatedAt +} + // GetDescription returns the Description field if it's non-nil, zero value otherwise. func (c *CustomRepoRoles) GetDescription() string { if c == nil || c.Description == nil { @@ -4758,6 +4894,22 @@ func (c *CustomRepoRoles) GetName() string { return *c.Name } +// GetOrg returns the Org field. +func (c *CustomRepoRoles) GetOrg() *Organization { + if c == nil { + return nil + } + return c.Org +} + +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (c *CustomRepoRoles) GetUpdatedAt() Timestamp { + if c == nil || c.UpdatedAt == nil { + return Timestamp{} + } + return *c.UpdatedAt +} + // GetQuerySuite returns the QuerySuite field if it's non-nil, zero value otherwise. func (d *DefaultSetupConfiguration) GetQuerySuite() string { if d == nil || d.QuerySuite == nil { @@ -5766,6 +5918,110 @@ func (d *DeploymentRequest) GetTransientEnvironment() bool { return *d.TransientEnvironment } +// GetAction returns the Action field if it's non-nil, zero value otherwise. +func (d *DeploymentReviewEvent) GetAction() string { + if d == nil || d.Action == nil { + return "" + } + return *d.Action +} + +// GetApprover returns the Approver field. +func (d *DeploymentReviewEvent) GetApprover() *User { + if d == nil { + return nil + } + return d.Approver +} + +// GetComment returns the Comment field if it's non-nil, zero value otherwise. +func (d *DeploymentReviewEvent) GetComment() string { + if d == nil || d.Comment == nil { + return "" + } + return *d.Comment +} + +// GetEnterprise returns the Enterprise field. +func (d *DeploymentReviewEvent) GetEnterprise() *Enterprise { + if d == nil { + return nil + } + return d.Enterprise +} + +// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. +func (d *DeploymentReviewEvent) GetEnvironment() string { + if d == nil || d.Environment == nil { + return "" + } + return *d.Environment +} + +// GetInstallation returns the Installation field. +func (d *DeploymentReviewEvent) GetInstallation() *Installation { + if d == nil { + return nil + } + return d.Installation +} + +// GetOrganization returns the Organization field. +func (d *DeploymentReviewEvent) GetOrganization() *Organization { + if d == nil { + return nil + } + return d.Organization +} + +// GetRepo returns the Repo field. +func (d *DeploymentReviewEvent) GetRepo() *Repository { + if d == nil { + return nil + } + return d.Repo +} + +// GetRequester returns the Requester field. +func (d *DeploymentReviewEvent) GetRequester() *User { + if d == nil { + return nil + } + return d.Requester +} + +// GetSender returns the Sender field. +func (d *DeploymentReviewEvent) GetSender() *User { + if d == nil { + return nil + } + return d.Sender +} + +// GetSince returns the Since field if it's non-nil, zero value otherwise. +func (d *DeploymentReviewEvent) GetSince() string { + if d == nil || d.Since == nil { + return "" + } + return *d.Since +} + +// GetWorkflowJobRun returns the WorkflowJobRun field. +func (d *DeploymentReviewEvent) GetWorkflowJobRun() *WorkflowJobRun { + if d == nil { + return nil + } + return d.WorkflowJobRun +} + +// GetWorkflowRun returns the WorkflowRun field. +func (d *DeploymentReviewEvent) GetWorkflowRun() *WorkflowRun { + if d == nil { + return nil + } + return d.WorkflowRun +} + // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. func (d *DeploymentStatus) GetCreatedAt() Timestamp { if d == nil || d.CreatedAt == nil { @@ -8750,6 +9006,14 @@ func (i *InstallationPermissions) GetActions() string { return *i.Actions } +// GetActionsVariables returns the ActionsVariables field if it's non-nil, zero value otherwise. +func (i *InstallationPermissions) GetActionsVariables() string { + if i == nil || i.ActionsVariables == nil { + return "" + } + return *i.ActionsVariables +} + // GetAdministration returns the Administration field if it's non-nil, zero value otherwise. func (i *InstallationPermissions) GetAdministration() string { if i == nil || i.Administration == nil { @@ -8854,6 +9118,14 @@ func (i *InstallationPermissions) GetOrganizationAdministration() string { return *i.OrganizationAdministration } +// GetOrganizationCustomOrgRoles returns the OrganizationCustomOrgRoles field if it's non-nil, zero value otherwise. +func (i *InstallationPermissions) GetOrganizationCustomOrgRoles() string { + if i == nil || i.OrganizationCustomOrgRoles == nil { + return "" + } + return *i.OrganizationCustomOrgRoles +} + // GetOrganizationCustomProperties returns the OrganizationCustomProperties field if it's non-nil, zero value otherwise. func (i *InstallationPermissions) GetOrganizationCustomProperties() string { if i == nil || i.OrganizationCustomProperties == nil { @@ -8886,6 +9158,22 @@ func (i *InstallationPermissions) GetOrganizationPackages() string { return *i.OrganizationPackages } +// GetOrganizationPersonalAccessTokenRequests returns the OrganizationPersonalAccessTokenRequests field if it's non-nil, zero value otherwise. +func (i *InstallationPermissions) GetOrganizationPersonalAccessTokenRequests() string { + if i == nil || i.OrganizationPersonalAccessTokenRequests == nil { + return "" + } + return *i.OrganizationPersonalAccessTokenRequests +} + +// GetOrganizationPersonalAccessTokens returns the OrganizationPersonalAccessTokens field if it's non-nil, zero value otherwise. +func (i *InstallationPermissions) GetOrganizationPersonalAccessTokens() string { + if i == nil || i.OrganizationPersonalAccessTokens == nil { + return "" + } + return *i.OrganizationPersonalAccessTokens +} + // GetOrganizationPlan returns the OrganizationPlan field if it's non-nil, zero value otherwise. func (i *InstallationPermissions) GetOrganizationPlan() string { if i == nil || i.OrganizationPlan == nil { @@ -12838,6 +13126,14 @@ func (o *OrganizationCustomRepoRoles) GetTotalCount() int { return *o.TotalCount } +// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise. +func (o *OrganizationCustomRoles) GetTotalCount() int { + if o == nil || o.TotalCount == nil { + return 0 + } + return *o.TotalCount +} + // GetAction returns the Action field if it's non-nil, zero value otherwise. func (o *OrganizationEvent) GetAction() string { if o == nil || o.Action == nil { @@ -14230,6 +14526,150 @@ func (p *PagesUpdate) GetSource() *PagesSource { return p.Source } +// GetCurrentUserCanApprove returns the CurrentUserCanApprove field if it's non-nil, zero value otherwise. +func (p *PendingDeployment) GetCurrentUserCanApprove() bool { + if p == nil || p.CurrentUserCanApprove == nil { + return false + } + return *p.CurrentUserCanApprove +} + +// GetEnvironment returns the Environment field. +func (p *PendingDeployment) GetEnvironment() *PendingDeploymentEnvironment { + if p == nil { + return nil + } + return p.Environment +} + +// GetWaitTimer returns the WaitTimer field if it's non-nil, zero value otherwise. +func (p *PendingDeployment) GetWaitTimer() int64 { + if p == nil || p.WaitTimer == nil { + return 0 + } + return *p.WaitTimer +} + +// GetWaitTimerStartedAt returns the WaitTimerStartedAt field if it's non-nil, zero value otherwise. +func (p *PendingDeployment) GetWaitTimerStartedAt() Timestamp { + if p == nil || p.WaitTimerStartedAt == nil { + return Timestamp{} + } + return *p.WaitTimerStartedAt +} + +// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. +func (p *PendingDeploymentEnvironment) GetHTMLURL() string { + if p == nil || p.HTMLURL == nil { + return "" + } + return *p.HTMLURL +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (p *PendingDeploymentEnvironment) GetID() int64 { + if p == nil || p.ID == nil { + return 0 + } + return *p.ID +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (p *PendingDeploymentEnvironment) GetName() string { + if p == nil || p.Name == nil { + return "" + } + return *p.Name +} + +// GetNodeID returns the NodeID field if it's non-nil, zero value otherwise. +func (p *PendingDeploymentEnvironment) GetNodeID() string { + if p == nil || p.NodeID == nil { + return "" + } + return *p.NodeID +} + +// GetURL returns the URL field if it's non-nil, zero value otherwise. +func (p *PendingDeploymentEnvironment) GetURL() string { + if p == nil || p.URL == nil { + return "" + } + return *p.URL +} + +// GetAccessGrantedAt returns the AccessGrantedAt field if it's non-nil, zero value otherwise. +func (p *PersonalAccessToken) GetAccessGrantedAt() Timestamp { + if p == nil || p.AccessGrantedAt == nil { + return Timestamp{} + } + return *p.AccessGrantedAt +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (p *PersonalAccessToken) GetID() int64 { + if p == nil || p.ID == nil { + return 0 + } + return *p.ID +} + +// GetOwner returns the Owner field. +func (p *PersonalAccessToken) GetOwner() *User { + if p == nil { + return nil + } + return p.Owner +} + +// GetPermissions returns the Permissions field. +func (p *PersonalAccessToken) GetPermissions() *PersonalAccessTokenPermissions { + if p == nil { + return nil + } + return p.Permissions +} + +// GetRepositoriesURL returns the RepositoriesURL field if it's non-nil, zero value otherwise. +func (p *PersonalAccessToken) GetRepositoriesURL() string { + if p == nil || p.RepositoriesURL == nil { + return "" + } + return *p.RepositoriesURL +} + +// GetRepositorySelection returns the RepositorySelection field if it's non-nil, zero value otherwise. +func (p *PersonalAccessToken) GetRepositorySelection() string { + if p == nil || p.RepositorySelection == nil { + return "" + } + return *p.RepositorySelection +} + +// GetTokenExpired returns the TokenExpired field if it's non-nil, zero value otherwise. +func (p *PersonalAccessToken) GetTokenExpired() bool { + if p == nil || p.TokenExpired == nil { + return false + } + return *p.TokenExpired +} + +// GetTokenExpiresAt returns the TokenExpiresAt field if it's non-nil, zero value otherwise. +func (p *PersonalAccessToken) GetTokenExpiresAt() Timestamp { + if p == nil || p.TokenExpiresAt == nil { + return Timestamp{} + } + return *p.TokenExpiresAt +} + +// GetTokenLastUsedAt returns the TokenLastUsedAt field if it's non-nil, zero value otherwise. +func (p *PersonalAccessToken) GetTokenLastUsedAt() Timestamp { + if p == nil || p.TokenLastUsedAt == nil { + return Timestamp{} + } + return *p.TokenLastUsedAt +} + // GetOrg returns the Org map if it's non-nil, an empty map otherwise. func (p *PersonalAccessTokenPermissions) GetOrg() map[string]string { if p == nil || p.Org == nil { @@ -17526,14 +17966,6 @@ func (p *PushEventRepository) GetCreatedAt() Timestamp { return *p.CreatedAt } -// GetCustomProperties returns the CustomProperties map if it's non-nil, an empty map otherwise. -func (p *PushEventRepository) GetCustomProperties() map[string]string { - if p == nil || p.CustomProperties == nil { - return map[string]string{} - } - return p.CustomProperties -} - // GetDefaultBranch returns the DefaultBranch field if it's non-nil, zero value otherwise. func (p *PushEventRepository) GetDefaultBranch() string { if p == nil || p.DefaultBranch == nil { @@ -18678,14 +19110,6 @@ func (r *Repository) GetCreatedAt() Timestamp { return *r.CreatedAt } -// GetCustomProperties returns the CustomProperties map if it's non-nil, an empty map otherwise. -func (r *Repository) GetCustomProperties() map[string]string { - if r == nil || r.CustomProperties == nil { - return map[string]string{} - } - return r.CustomProperties -} - // GetDefaultBranch returns the DefaultBranch field if it's non-nil, zero value otherwise. func (r *Repository) GetDefaultBranch() string { if r == nil || r.DefaultBranch == nil { @@ -19998,6 +20422,14 @@ func (r *RepositoryPermissionLevel) GetPermission() string { return *r.Permission } +// GetRoleName returns the RoleName field if it's non-nil, zero value otherwise. +func (r *RepositoryPermissionLevel) GetRoleName() string { + if r == nil || r.RoleName == nil { + return "" + } + return *r.RoleName +} + // GetUser returns the User field. func (r *RepositoryPermissionLevel) GetUser() *User { if r == nil { @@ -20654,6 +21086,14 @@ func (r *Rule) GetSeverity() string { return *r.Severity } +// GetRestrictedFilePaths returns the RestrictedFilePaths field if it's non-nil, zero value otherwise. +func (r *RuleFileParameters) GetRestrictedFilePaths() []string { + if r == nil || r.RestrictedFilePaths == nil { + return nil + } + return *r.RestrictedFilePaths +} + // GetName returns the Name field if it's non-nil, zero value otherwise. func (r *RulePatternParameters) GetName() string { if r == nil || r.Name == nil { @@ -20774,6 +21214,14 @@ func (r *RulesetConditions) GetRepositoryName() *RulesetRepositoryNamesCondition return r.RepositoryName } +// GetRepositoryProperty returns the RepositoryProperty field. +func (r *RulesetConditions) GetRepositoryProperty() *RulesetRepositoryPropertyConditionParameters { + if r == nil { + return nil + } + return r.RepositoryProperty +} + // GetHRef returns the HRef field if it's non-nil, zero value otherwise. func (r *RulesetLink) GetHRef() string { if r == nil || r.HRef == nil { @@ -22158,6 +22606,86 @@ func (s *SourceImportAuthor) GetURL() string { return *s.URL } +// GetPrivacyLevel returns the PrivacyLevel field if it's non-nil, zero value otherwise. +func (s *SponsorshipChanges) GetPrivacyLevel() string { + if s == nil || s.PrivacyLevel == nil { + return "" + } + return *s.PrivacyLevel +} + +// GetTier returns the Tier field. +func (s *SponsorshipChanges) GetTier() *SponsorshipTier { + if s == nil { + return nil + } + return s.Tier +} + +// GetAction returns the Action field if it's non-nil, zero value otherwise. +func (s *SponsorshipEvent) GetAction() string { + if s == nil || s.Action == nil { + return "" + } + return *s.Action +} + +// GetChanges returns the Changes field. +func (s *SponsorshipEvent) GetChanges() *SponsorshipChanges { + if s == nil { + return nil + } + return s.Changes +} + +// GetEffectiveDate returns the EffectiveDate field if it's non-nil, zero value otherwise. +func (s *SponsorshipEvent) GetEffectiveDate() string { + if s == nil || s.EffectiveDate == nil { + return "" + } + return *s.EffectiveDate +} + +// GetInstallation returns the Installation field. +func (s *SponsorshipEvent) GetInstallation() *Installation { + if s == nil { + return nil + } + return s.Installation +} + +// GetOrganization returns the Organization field. +func (s *SponsorshipEvent) GetOrganization() *Organization { + if s == nil { + return nil + } + return s.Organization +} + +// GetRepository returns the Repository field. +func (s *SponsorshipEvent) GetRepository() *Repository { + if s == nil { + return nil + } + return s.Repository +} + +// GetSender returns the Sender field. +func (s *SponsorshipEvent) GetSender() *User { + if s == nil { + return nil + } + return s.Sender +} + +// GetFrom returns the From field if it's non-nil, zero value otherwise. +func (s *SponsorshipTier) GetFrom() string { + if s == nil || s.From == nil { + return "" + } + return *s.From +} + // GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. func (s *SSHSigningKey) GetCreatedAt() Timestamp { if s == nil || s.CreatedAt == nil { @@ -25086,6 +25614,70 @@ func (w *WorkflowJobEvent) GetWorkflowJob() *WorkflowJob { return w.WorkflowJob } +// GetConclusion returns the Conclusion field if it's non-nil, zero value otherwise. +func (w *WorkflowJobRun) GetConclusion() string { + if w == nil || w.Conclusion == nil { + return "" + } + return *w.Conclusion +} + +// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise. +func (w *WorkflowJobRun) GetCreatedAt() Timestamp { + if w == nil || w.CreatedAt == nil { + return Timestamp{} + } + return *w.CreatedAt +} + +// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise. +func (w *WorkflowJobRun) GetEnvironment() string { + if w == nil || w.Environment == nil { + return "" + } + return *w.Environment +} + +// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise. +func (w *WorkflowJobRun) GetHTMLURL() string { + if w == nil || w.HTMLURL == nil { + return "" + } + return *w.HTMLURL +} + +// GetID returns the ID field if it's non-nil, zero value otherwise. +func (w *WorkflowJobRun) GetID() int64 { + if w == nil || w.ID == nil { + return 0 + } + return *w.ID +} + +// GetName returns the Name field if it's non-nil, zero value otherwise. +func (w *WorkflowJobRun) GetName() string { + if w == nil || w.Name == nil { + return "" + } + return *w.Name +} + +// GetStatus returns the Status field if it's non-nil, zero value otherwise. +func (w *WorkflowJobRun) GetStatus() string { + if w == nil || w.Status == nil { + return "" + } + return *w.Status +} + +// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise. +func (w *WorkflowJobRun) GetUpdatedAt() Timestamp { + if w == nil || w.UpdatedAt == nil { + return Timestamp{} + } + return *w.UpdatedAt +} + // GetActor returns the Actor field. func (w *WorkflowRun) GetActor() *User { if w == nil { @@ -25246,6 +25838,14 @@ func (w *WorkflowRun) GetNodeID() string { return *w.NodeID } +// GetPath returns the Path field if it's non-nil, zero value otherwise. +func (w *WorkflowRun) GetPath() string { + if w == nil || w.Path == nil { + return "" + } + return *w.Path +} + // GetPreviousAttemptURL returns the PreviousAttemptURL field if it's non-nil, zero value otherwise. func (w *WorkflowRun) GetPreviousAttemptURL() string { if w == nil || w.PreviousAttemptURL == nil { diff --git a/vendor/github.com/google/go-github/v62/github/github.go b/vendor/github.com/google/go-github/v66/github/github.go similarity index 99% rename from vendor/github.com/google/go-github/v62/github/github.go rename to vendor/github.com/google/go-github/v66/github/github.go index 28a3c10bb..22fbb9747 100644 --- a/vendor/github.com/google/go-github/v62/github/github.go +++ b/vendor/github.com/google/go-github/v66/github/github.go @@ -28,7 +28,7 @@ import ( ) const ( - Version = "v62.0.0" + Version = "v66.0.0" defaultAPIVersion = "2022-11-28" defaultBaseURL = "https://api.github.com/" @@ -774,7 +774,7 @@ func parseSecondaryRate(r *http.Response) *time.Duration { // According to GitHub support, endpoints might return x-ratelimit-reset instead, // as an integer which represents the number of seconds since epoch UTC, - // represting the time to resume making requests. + // representing the time to resume making requests. if v := r.Header.Get(headerRateReset); v != "" { secondsSinceEpoch, _ := strconv.ParseInt(v, 10, 64) // Error handling is noop. retryAfter := time.Until(time.Unix(secondsSinceEpoch, 0)) diff --git a/vendor/github.com/google/go-github/v62/github/gitignore.go b/vendor/github.com/google/go-github/v66/github/gitignore.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/gitignore.go rename to vendor/github.com/google/go-github/v66/github/gitignore.go diff --git a/vendor/github.com/google/go-github/v62/github/interactions.go b/vendor/github.com/google/go-github/v66/github/interactions.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/interactions.go rename to vendor/github.com/google/go-github/v66/github/interactions.go diff --git a/vendor/github.com/google/go-github/v62/github/interactions_orgs.go b/vendor/github.com/google/go-github/v66/github/interactions_orgs.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/interactions_orgs.go rename to vendor/github.com/google/go-github/v66/github/interactions_orgs.go diff --git a/vendor/github.com/google/go-github/v62/github/interactions_repos.go b/vendor/github.com/google/go-github/v66/github/interactions_repos.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/interactions_repos.go rename to vendor/github.com/google/go-github/v66/github/interactions_repos.go diff --git a/vendor/github.com/google/go-github/v62/github/issue_import.go b/vendor/github.com/google/go-github/v66/github/issue_import.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/issue_import.go rename to vendor/github.com/google/go-github/v66/github/issue_import.go diff --git a/vendor/github.com/google/go-github/v62/github/issues.go b/vendor/github.com/google/go-github/v66/github/issues.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/issues.go rename to vendor/github.com/google/go-github/v66/github/issues.go diff --git a/vendor/github.com/google/go-github/v62/github/issues_assignees.go b/vendor/github.com/google/go-github/v66/github/issues_assignees.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/issues_assignees.go rename to vendor/github.com/google/go-github/v66/github/issues_assignees.go diff --git a/vendor/github.com/google/go-github/v62/github/issues_comments.go b/vendor/github.com/google/go-github/v66/github/issues_comments.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/issues_comments.go rename to vendor/github.com/google/go-github/v66/github/issues_comments.go diff --git a/vendor/github.com/google/go-github/v62/github/issues_events.go b/vendor/github.com/google/go-github/v66/github/issues_events.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/issues_events.go rename to vendor/github.com/google/go-github/v66/github/issues_events.go diff --git a/vendor/github.com/google/go-github/v62/github/issues_labels.go b/vendor/github.com/google/go-github/v66/github/issues_labels.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/issues_labels.go rename to vendor/github.com/google/go-github/v66/github/issues_labels.go diff --git a/vendor/github.com/google/go-github/v62/github/issues_milestones.go b/vendor/github.com/google/go-github/v66/github/issues_milestones.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/issues_milestones.go rename to vendor/github.com/google/go-github/v66/github/issues_milestones.go diff --git a/vendor/github.com/google/go-github/v62/github/issues_timeline.go b/vendor/github.com/google/go-github/v66/github/issues_timeline.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/issues_timeline.go rename to vendor/github.com/google/go-github/v66/github/issues_timeline.go diff --git a/vendor/github.com/google/go-github/v62/github/licenses.go b/vendor/github.com/google/go-github/v66/github/licenses.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/licenses.go rename to vendor/github.com/google/go-github/v66/github/licenses.go diff --git a/vendor/github.com/google/go-github/v62/github/markdown.go b/vendor/github.com/google/go-github/v66/github/markdown.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/markdown.go rename to vendor/github.com/google/go-github/v66/github/markdown.go diff --git a/vendor/github.com/google/go-github/v62/github/messages.go b/vendor/github.com/google/go-github/v66/github/messages.go similarity index 99% rename from vendor/github.com/google/go-github/v62/github/messages.go rename to vendor/github.com/google/go-github/v66/github/messages.go index 72edbd9fe..30c4fca93 100644 --- a/vendor/github.com/google/go-github/v62/github/messages.go +++ b/vendor/github.com/google/go-github/v66/github/messages.go @@ -57,6 +57,7 @@ var ( "dependabot_alert": &DependabotAlertEvent{}, "deploy_key": &DeployKeyEvent{}, "deployment": &DeploymentEvent{}, + "deployment_review": &DeploymentReviewEvent{}, "deployment_status": &DeploymentStatusEvent{}, "deployment_protection_rule": &DeploymentProtectionRuleEvent{}, "discussion": &DiscussionEvent{}, @@ -102,6 +103,7 @@ var ( "secret_scanning_alert": &SecretScanningAlertEvent{}, "security_advisory": &SecurityAdvisoryEvent{}, "security_and_analysis": &SecurityAndAnalysisEvent{}, + "sponsorship": &SponsorshipEvent{}, "star": &StarEvent{}, "status": &StatusEvent{}, "team": &TeamEvent{}, diff --git a/vendor/github.com/google/go-github/v62/github/meta.go b/vendor/github.com/google/go-github/v66/github/meta.go similarity index 83% rename from vendor/github.com/google/go-github/v62/github/meta.go rename to vendor/github.com/google/go-github/v66/github/meta.go index e0e355e8c..cc90b618b 100644 --- a/vendor/github.com/google/go-github/v62/github/meta.go +++ b/vendor/github.com/google/go-github/v66/github/meta.go @@ -70,9 +70,25 @@ type APIMeta struct { // which serve GitHub APIs. API []string `json:"api,omitempty"` - // A map of GitHub services and their associated domains. Note that many - // of these domains are represented as wildcards (e.g. "*.github.com"). - Domains map[string][]string `json:"domains,omitempty"` + // GitHub services and their associated domains. Note that many of these domains + // are represented as wildcards (e.g. "*.github.com"). + Domains *APIMetaDomains `json:"domains,omitempty"` +} + +// APIMetaDomains represents the domains associated with GitHub services. +type APIMetaDomains struct { + Website []string `json:"website,omitempty"` + Codespaces []string `json:"codespaces,omitempty"` + Copilot []string `json:"copilot,omitempty"` + Packages []string `json:"packages,omitempty"` + Actions []string `json:"actions,omitempty"` + ArtifactAttestations *APIMetaArtifactAttestations `json:"artifact_attestations,omitempty"` +} + +// APIMetaArtifactAttestations represents the artifact attestation services domains. +type APIMetaArtifactAttestations struct { + TrustDomain string `json:"trust_domain,omitempty"` + Services []string `json:"services,omitempty"` } // Get returns information about GitHub.com, the service. Or, if you access diff --git a/vendor/github.com/google/go-github/v62/github/migrations.go b/vendor/github.com/google/go-github/v66/github/migrations.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/migrations.go rename to vendor/github.com/google/go-github/v66/github/migrations.go diff --git a/vendor/github.com/google/go-github/v62/github/migrations_source_import.go b/vendor/github.com/google/go-github/v66/github/migrations_source_import.go similarity index 99% rename from vendor/github.com/google/go-github/v62/github/migrations_source_import.go rename to vendor/github.com/google/go-github/v66/github/migrations_source_import.go index 3b161232f..f484b77d4 100644 --- a/vendor/github.com/google/go-github/v62/github/migrations_source_import.go +++ b/vendor/github.com/google/go-github/v66/github/migrations_source_import.go @@ -76,7 +76,7 @@ type Import struct { // Contact GitHub support for more information. // detection_needs_auth - the importer requires authentication for // the originating repository to continue detection. Make an - // UpdatImport request, and include VCSUsername and + // UpdateImport request, and include VCSUsername and // VCSPassword. // detection_found_nothing - the importer didn't recognize any // source control at the URL. diff --git a/vendor/github.com/google/go-github/v62/github/migrations_user.go b/vendor/github.com/google/go-github/v66/github/migrations_user.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/migrations_user.go rename to vendor/github.com/google/go-github/v66/github/migrations_user.go diff --git a/vendor/github.com/google/go-github/v62/github/orgs.go b/vendor/github.com/google/go-github/v66/github/orgs.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs.go rename to vendor/github.com/google/go-github/v66/github/orgs.go diff --git a/vendor/github.com/google/go-github/v62/github/orgs_actions_allowed.go b/vendor/github.com/google/go-github/v66/github/orgs_actions_allowed.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs_actions_allowed.go rename to vendor/github.com/google/go-github/v66/github/orgs_actions_allowed.go diff --git a/vendor/github.com/google/go-github/v62/github/orgs_actions_permissions.go b/vendor/github.com/google/go-github/v66/github/orgs_actions_permissions.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs_actions_permissions.go rename to vendor/github.com/google/go-github/v66/github/orgs_actions_permissions.go diff --git a/vendor/github.com/google/go-github/v62/github/orgs_audit_log.go b/vendor/github.com/google/go-github/v66/github/orgs_audit_log.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs_audit_log.go rename to vendor/github.com/google/go-github/v66/github/orgs_audit_log.go diff --git a/vendor/github.com/google/go-github/v62/github/orgs_credential_authorizations.go b/vendor/github.com/google/go-github/v66/github/orgs_credential_authorizations.go similarity index 83% rename from vendor/github.com/google/go-github/v62/github/orgs_credential_authorizations.go rename to vendor/github.com/google/go-github/v66/github/orgs_credential_authorizations.go index eed0f0c66..dca42433c 100644 --- a/vendor/github.com/google/go-github/v62/github/orgs_credential_authorizations.go +++ b/vendor/github.com/google/go-github/v66/github/orgs_credential_authorizations.go @@ -55,13 +55,23 @@ type CredentialAuthorization struct { AuthorizedCredentialExpiresAt *Timestamp `json:"authorized_credential_expires_at,omitempty"` } +// CredentialAuthorizationsListOptions adds the Login option as supported by the +// list SAML SSO authorizations for organizations endpoint alongside paging options +// such as Page and PerPage. +// GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/orgs/orgs#list-saml-sso-authorizations-for-an-organization +type CredentialAuthorizationsListOptions struct { + ListOptions + // For credentials authorizations for an organization, limit the list of authorizations to a specific login (aka github username) + Login string `url:"login,omitempty"` +} + // ListCredentialAuthorizations lists credentials authorized through SAML SSO // for a given organization. Only available with GitHub Enterprise Cloud. // // GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/orgs/orgs#list-saml-sso-authorizations-for-an-organization // //meta:operation GET /orgs/{org}/credential-authorizations -func (s *OrganizationsService) ListCredentialAuthorizations(ctx context.Context, org string, opts *ListOptions) ([]*CredentialAuthorization, *Response, error) { +func (s *OrganizationsService) ListCredentialAuthorizations(ctx context.Context, org string, opts *CredentialAuthorizationsListOptions) ([]*CredentialAuthorization, *Response, error) { u := fmt.Sprintf("orgs/%v/credential-authorizations", org) u, err := addOptions(u, opts) if err != nil { diff --git a/vendor/github.com/google/go-github/v62/github/orgs_custom_roles.go b/vendor/github.com/google/go-github/v66/github/orgs_custom_repository_roles.go similarity index 61% rename from vendor/github.com/google/go-github/v62/github/orgs_custom_roles.go rename to vendor/github.com/google/go-github/v66/github/orgs_custom_repository_roles.go index e1deeb73e..2295e1b8a 100644 --- a/vendor/github.com/google/go-github/v62/github/orgs_custom_roles.go +++ b/vendor/github.com/google/go-github/v66/github/orgs_custom_repository_roles.go @@ -1,4 +1,4 @@ -// Copyright 2022 The go-github AUTHORS. All rights reserved. +// Copyright 2024 The go-github AUTHORS. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -20,11 +20,22 @@ type OrganizationCustomRepoRoles struct { // See https://docs.github.com/enterprise-cloud@latest/organizations/managing-peoples-access-to-your-organization-with-roles/managing-custom-repository-roles-for-an-organization // for more information. type CustomRepoRoles struct { - ID *int64 `json:"id,omitempty"` + ID *int64 `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + BaseRole *string `json:"base_role,omitempty"` + Permissions []string `json:"permissions,omitempty"` + Org *Organization `json:"organization,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` +} + +// CreateOrUpdateCustomRepoRoleOptions represents options required to create or update a custom repository role. +type CreateOrUpdateCustomRepoRoleOptions struct { Name *string `json:"name,omitempty"` Description *string `json:"description,omitempty"` BaseRole *string `json:"base_role,omitempty"` - Permissions []string `json:"permissions,omitempty"` + Permissions []string `json:"permissions"` } // ListCustomRepoRoles lists the custom repository roles available in this organization. @@ -50,21 +61,13 @@ func (s *OrganizationsService) ListCustomRepoRoles(ctx context.Context, org stri return customRepoRoles, resp, nil } -// CreateOrUpdateCustomRoleOptions represents options required to create or update a custom repository role. -type CreateOrUpdateCustomRoleOptions struct { - Name *string `json:"name,omitempty"` - Description *string `json:"description,omitempty"` - BaseRole *string `json:"base_role,omitempty"` - Permissions []string `json:"permissions,omitempty"` -} - // CreateCustomRepoRole creates a custom repository role in this organization. // In order to create custom repository roles in an organization, the authenticated user must be an organization owner. // // GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/orgs/custom-roles#create-a-custom-repository-role // //meta:operation POST /orgs/{org}/custom-repository-roles -func (s *OrganizationsService) CreateCustomRepoRole(ctx context.Context, org string, opts *CreateOrUpdateCustomRoleOptions) (*CustomRepoRoles, *Response, error) { +func (s *OrganizationsService) CreateCustomRepoRole(ctx context.Context, org string, opts *CreateOrUpdateCustomRepoRoleOptions) (*CustomRepoRoles, *Response, error) { u := fmt.Sprintf("orgs/%v/custom-repository-roles", org) req, err := s.client.NewRequest("POST", u, opts) @@ -87,7 +90,7 @@ func (s *OrganizationsService) CreateCustomRepoRole(ctx context.Context, org str // GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/orgs/custom-roles#update-a-custom-repository-role // //meta:operation PATCH /orgs/{org}/custom-repository-roles/{role_id} -func (s *OrganizationsService) UpdateCustomRepoRole(ctx context.Context, org, roleID string, opts *CreateOrUpdateCustomRoleOptions) (*CustomRepoRoles, *Response, error) { +func (s *OrganizationsService) UpdateCustomRepoRole(ctx context.Context, org string, roleID int64, opts *CreateOrUpdateCustomRepoRoleOptions) (*CustomRepoRoles, *Response, error) { u := fmt.Sprintf("orgs/%v/custom-repository-roles/%v", org, roleID) req, err := s.client.NewRequest("PATCH", u, opts) @@ -110,7 +113,7 @@ func (s *OrganizationsService) UpdateCustomRepoRole(ctx context.Context, org, ro // GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/orgs/custom-roles#delete-a-custom-repository-role // //meta:operation DELETE /orgs/{org}/custom-repository-roles/{role_id} -func (s *OrganizationsService) DeleteCustomRepoRole(ctx context.Context, org, roleID string) (*Response, error) { +func (s *OrganizationsService) DeleteCustomRepoRole(ctx context.Context, org string, roleID int64) (*Response, error) { u := fmt.Sprintf("orgs/%v/custom-repository-roles/%v", org, roleID) req, err := s.client.NewRequest("DELETE", u, nil) @@ -126,57 +129,3 @@ func (s *OrganizationsService) DeleteCustomRepoRole(ctx context.Context, org, ro return resp, nil } - -// ListTeamsAssignedToOrgRole returns all teams assigned to a specific organization role. -// In order to list teams assigned to an organization role, the authenticated user must be an organization owner. -// -// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#list-teams-that-are-assigned-to-an-organization-role -// -//meta:operation GET /orgs/{org}/organization-roles/{role_id}/teams -func (s *OrganizationsService) ListTeamsAssignedToOrgRole(ctx context.Context, org string, roleID int64, opts *ListOptions) ([]*Team, *Response, error) { - u := fmt.Sprintf("orgs/%v/organization-roles/%v/teams", org, roleID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var teams []*Team - resp, err := s.client.Do(ctx, req, &teams) - if err != nil { - return nil, resp, err - } - - return teams, resp, nil -} - -// ListUsersAssignedToOrgRole returns all users assigned to a specific organization role. -// In order to list users assigned to an organization role, the authenticated user must be an organization owner. -// -// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#list-users-that-are-assigned-to-an-organization-role -// -//meta:operation GET /orgs/{org}/organization-roles/{role_id}/users -func (s *OrganizationsService) ListUsersAssignedToOrgRole(ctx context.Context, org string, roleID int64, opts *ListOptions) ([]*User, *Response, error) { - u := fmt.Sprintf("orgs/%v/organization-roles/%v/users", org, roleID) - u, err := addOptions(u, opts) - if err != nil { - return nil, nil, err - } - - req, err := s.client.NewRequest("GET", u, nil) - if err != nil { - return nil, nil, err - } - - var users []*User - resp, err := s.client.Do(ctx, req, &users) - if err != nil { - return nil, resp, err - } - - return users, resp, nil -} diff --git a/vendor/github.com/google/go-github/v62/github/orgs_hooks.go b/vendor/github.com/google/go-github/v66/github/orgs_hooks.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs_hooks.go rename to vendor/github.com/google/go-github/v66/github/orgs_hooks.go diff --git a/vendor/github.com/google/go-github/v62/github/orgs_hooks_configuration.go b/vendor/github.com/google/go-github/v66/github/orgs_hooks_configuration.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs_hooks_configuration.go rename to vendor/github.com/google/go-github/v66/github/orgs_hooks_configuration.go diff --git a/vendor/github.com/google/go-github/v62/github/orgs_hooks_deliveries.go b/vendor/github.com/google/go-github/v66/github/orgs_hooks_deliveries.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs_hooks_deliveries.go rename to vendor/github.com/google/go-github/v66/github/orgs_hooks_deliveries.go diff --git a/vendor/github.com/google/go-github/v62/github/orgs_members.go b/vendor/github.com/google/go-github/v66/github/orgs_members.go similarity index 95% rename from vendor/github.com/google/go-github/v62/github/orgs_members.go rename to vendor/github.com/google/go-github/v66/github/orgs_members.go index 5bc23657f..d818d7f9f 100644 --- a/vendor/github.com/google/go-github/v62/github/orgs_members.go +++ b/vendor/github.com/google/go-github/v66/github/orgs_members.go @@ -151,6 +151,20 @@ func (s *OrganizationsService) RemoveMember(ctx context.Context, org, user strin return s.client.Do(ctx, req, nil) } +// CancelInvite cancels an organization invitation. +// +// GitHub API docs: https://docs.github.com/rest/orgs/members#cancel-an-organization-invitation +// +//meta:operation DELETE /orgs/{org}/invitations/{invitation_id} +func (s *OrganizationsService) CancelInvite(ctx context.Context, org string, invitationID int64) (*Response, error) { + u := fmt.Sprintf("orgs/%v/invitations/%v", org, invitationID) + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + return s.client.Do(ctx, req, nil) +} + // PublicizeMembership publicizes a user's membership in an organization. (A // user cannot publicize the membership for another user.) // @@ -395,7 +409,7 @@ func (s *OrganizationsService) ListOrgInvitationTeams(ctx context.Context, org, return orgInvitationTeams, resp, nil } -// ListFailedOrgInvitations returns a list of failed inviatations. +// ListFailedOrgInvitations returns a list of failed invitations. // // GitHub API docs: https://docs.github.com/rest/orgs/members#list-failed-organization-invitations // diff --git a/vendor/github.com/google/go-github/v66/github/orgs_organization_roles.go b/vendor/github.com/google/go-github/v66/github/orgs_organization_roles.go new file mode 100644 index 000000000..695c4dade --- /dev/null +++ b/vendor/github.com/google/go-github/v66/github/orgs_organization_roles.go @@ -0,0 +1,295 @@ +// Copyright 2022 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" +) + +// OrganizationCustomRoles represents custom organization roles available in specified organization. +type OrganizationCustomRoles struct { + TotalCount *int `json:"total_count,omitempty"` + CustomRepoRoles []*CustomOrgRoles `json:"roles,omitempty"` +} + +// CustomOrgRoles represents custom organization role available in specified organization. +type CustomOrgRoles struct { + ID *int64 `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Permissions []string `json:"permissions,omitempty"` + Org *Organization `json:"organization,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + Source *string `json:"source,omitempty"` + BaseRole *string `json:"base_role,omitempty"` +} + +// CreateOrUpdateOrgRoleOptions represents options required to create or update a custom organization role. +type CreateOrUpdateOrgRoleOptions struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Permissions []string `json:"permissions"` + BaseRole *string `json:"base_role,omitempty"` +} + +// ListRoles lists the custom roles available in this organization. +// In order to see custom roles in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#get-all-organization-roles-for-an-organization +// +//meta:operation GET /orgs/{org}/organization-roles +func (s *OrganizationsService) ListRoles(ctx context.Context, org string) (*OrganizationCustomRoles, *Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles", org) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + customRepoRoles := new(OrganizationCustomRoles) + resp, err := s.client.Do(ctx, req, customRepoRoles) + if err != nil { + return nil, resp, err + } + + return customRepoRoles, resp, nil +} + +// GetOrgRole gets an organization role in this organization. +// In order to get organization roles in an organization, the authenticated user must be an organization owner, or have access via an organization role. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#get-an-organization-role +// +//meta:operation GET /orgs/{org}/organization-roles/{role_id} +func (s *OrganizationsService) GetOrgRole(ctx context.Context, org string, roleID int64) (*CustomOrgRoles, *Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles/%v", org, roleID) + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + resultingRole := new(CustomOrgRoles) + resp, err := s.client.Do(ctx, req, resultingRole) + if err != nil { + return nil, resp, err + } + + return resultingRole, resp, err +} + +// CreateCustomOrgRole creates a custom role in this organization. +// In order to create custom roles in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#create-a-custom-organization-role +// +//meta:operation POST /orgs/{org}/organization-roles +func (s *OrganizationsService) CreateCustomOrgRole(ctx context.Context, org string, opts *CreateOrUpdateOrgRoleOptions) (*CustomOrgRoles, *Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles", org) + + req, err := s.client.NewRequest("POST", u, opts) + if err != nil { + return nil, nil, err + } + + resultingRole := new(CustomOrgRoles) + resp, err := s.client.Do(ctx, req, resultingRole) + if err != nil { + return nil, resp, err + } + + return resultingRole, resp, err +} + +// UpdateCustomOrgRole updates a custom role in this organization. +// In order to update custom roles in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#update-a-custom-organization-role +// +//meta:operation PATCH /orgs/{org}/organization-roles/{role_id} +func (s *OrganizationsService) UpdateCustomOrgRole(ctx context.Context, org string, roleID int64, opts *CreateOrUpdateOrgRoleOptions) (*CustomOrgRoles, *Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles/%v", org, roleID) + + req, err := s.client.NewRequest("PATCH", u, opts) + if err != nil { + return nil, nil, err + } + + resultingRole := new(CustomOrgRoles) + resp, err := s.client.Do(ctx, req, resultingRole) + if err != nil { + return nil, resp, err + } + + return resultingRole, resp, err +} + +// DeleteCustomOrgRole deletes an existing custom role in this organization. +// In order to delete custom roles in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#delete-a-custom-organization-role +// +//meta:operation DELETE /orgs/{org}/organization-roles/{role_id} +func (s *OrganizationsService) DeleteCustomOrgRole(ctx context.Context, org string, roleID int64) (*Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles/%v", org, roleID) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + resultingRole := new(CustomOrgRoles) + resp, err := s.client.Do(ctx, req, resultingRole) + if err != nil { + return resp, err + } + + return resp, nil +} + +// AssignOrgRoleToTeam assigns an existing organization role to a team in this organization. +// In order to assign organization roles in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#assign-an-organization-role-to-a-team +// +//meta:operation PUT /orgs/{org}/organization-roles/teams/{team_slug}/{role_id} +func (s *OrganizationsService) AssignOrgRoleToTeam(ctx context.Context, org, teamSlug string, roleID int64) (*Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles/teams/%v/%v", org, teamSlug, roleID) + + req, err := s.client.NewRequest("PUT", u, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +// RemoveOrgRoleFromTeam removes an existing organization role assignment from a team in this organization. +// In order to remove organization role assignments in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#remove-an-organization-role-from-a-team +// +//meta:operation DELETE /orgs/{org}/organization-roles/teams/{team_slug}/{role_id} +func (s *OrganizationsService) RemoveOrgRoleFromTeam(ctx context.Context, org, teamSlug string, roleID int64) (*Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles/teams/%v/%v", org, teamSlug, roleID) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +// AssignOrgRoleToUser assigns an existing organization role to a user in this organization. +// In order to assign organization roles in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#assign-an-organization-role-to-a-user +// +//meta:operation PUT /orgs/{org}/organization-roles/users/{username}/{role_id} +func (s *OrganizationsService) AssignOrgRoleToUser(ctx context.Context, org, username string, roleID int64) (*Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles/users/%v/%v", org, username, roleID) + + req, err := s.client.NewRequest("PUT", u, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +// RemoveOrgRoleFromUser removes an existing organization role assignment from a user in this organization. +// In order to remove organization role assignments in an organization, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#remove-an-organization-role-from-a-user +// +//meta:operation DELETE /orgs/{org}/organization-roles/users/{username}/{role_id} +func (s *OrganizationsService) RemoveOrgRoleFromUser(ctx context.Context, org, username string, roleID int64) (*Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles/users/%v/%v", org, username, roleID) + + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(ctx, req, nil) + if err != nil { + return resp, err + } + + return resp, nil +} + +// ListTeamsAssignedToOrgRole returns all teams assigned to a specific organization role. +// In order to list teams assigned to an organization role, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#list-teams-that-are-assigned-to-an-organization-role +// +//meta:operation GET /orgs/{org}/organization-roles/{role_id}/teams +func (s *OrganizationsService) ListTeamsAssignedToOrgRole(ctx context.Context, org string, roleID int64, opts *ListOptions) ([]*Team, *Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles/%v/teams", org, roleID) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var teams []*Team + resp, err := s.client.Do(ctx, req, &teams) + if err != nil { + return nil, resp, err + } + + return teams, resp, nil +} + +// ListUsersAssignedToOrgRole returns all users assigned to a specific organization role. +// In order to list users assigned to an organization role, the authenticated user must be an organization owner. +// +// GitHub API docs: https://docs.github.com/rest/orgs/organization-roles#list-users-that-are-assigned-to-an-organization-role +// +//meta:operation GET /orgs/{org}/organization-roles/{role_id}/users +func (s *OrganizationsService) ListUsersAssignedToOrgRole(ctx context.Context, org string, roleID int64, opts *ListOptions) ([]*User, *Response, error) { + u := fmt.Sprintf("orgs/%v/organization-roles/%v/users", org, roleID) + u, err := addOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var users []*User + resp, err := s.client.Do(ctx, req, &users) + if err != nil { + return nil, resp, err + } + + return users, resp, nil +} diff --git a/vendor/github.com/google/go-github/v62/github/orgs_outside_collaborators.go b/vendor/github.com/google/go-github/v66/github/orgs_outside_collaborators.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs_outside_collaborators.go rename to vendor/github.com/google/go-github/v66/github/orgs_outside_collaborators.go diff --git a/vendor/github.com/google/go-github/v62/github/orgs_packages.go b/vendor/github.com/google/go-github/v66/github/orgs_packages.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs_packages.go rename to vendor/github.com/google/go-github/v66/github/orgs_packages.go diff --git a/vendor/github.com/google/go-github/v66/github/orgs_personal_access_tokens.go b/vendor/github.com/google/go-github/v66/github/orgs_personal_access_tokens.go new file mode 100644 index 000000000..af083744e --- /dev/null +++ b/vendor/github.com/google/go-github/v66/github/orgs_personal_access_tokens.go @@ -0,0 +1,167 @@ +// Copyright 2023 The go-github AUTHORS. All rights reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package github + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strings" +) + +// PersonalAccessToken represents the minimal representation of an organization programmatic access grant. +// +// GitHub API docs: https://docs.github.com/en/rest/orgs/personal-access-tokens?apiVersion=2022-11-28 +type PersonalAccessToken struct { + // "Unique identifier of the fine-grained personal access token. + // The `pat_id` used to get details about an approved fine-grained personal access token. + ID *int64 `json:"id"` + + // Owner is the GitHub user associated with the token. + Owner *User `json:"owner"` + + // RepositorySelection is the type of repository selection requested. + // Possible values are: "none", "all", "subset". + RepositorySelection *string `json:"repository_selection"` + + // URL to the list of repositories the fine-grained personal access token can access. + // Only follow when `repository_selection` is `subset`. + RepositoriesURL *string `json:"repositories_url"` + + // Permissions are the permissions requested, categorized by type. + Permissions *PersonalAccessTokenPermissions `json:"permissions"` + + // Date and time when the fine-grained personal access token was approved to access the organization. + AccessGrantedAt *Timestamp `json:"access_granted_at"` + + // Whether the associated fine-grained personal access token has expired. + TokenExpired *bool `json:"token_expired"` + + // Date and time when the associated fine-grained personal access token expires. + TokenExpiresAt *Timestamp `json:"token_expires_at"` + + // Date and time when the associated fine-grained personal access token was last used for authentication. + TokenLastUsedAt *Timestamp `json:"token_last_used_at"` +} + +// ListFineGrainedPATOptions specifies optional parameters to ListFineGrainedPersonalAccessTokens. +type ListFineGrainedPATOptions struct { + // The property by which to sort the results. + // Default: created_at + // Value: created_at + Sort string `url:"sort,omitempty"` + + // The direction to sort the results by. + // Default: desc + // Value: asc, desc + Direction string `url:"direction,omitempty"` + + // A list of owner usernames to use to filter the results. + Owner []string `url:"-"` + + // The name of the repository to use to filter the results. + Repository string `url:"repository,omitempty"` + + // The permission to use to filter the results. + Permission string `url:"permission,omitempty"` + + // Only show fine-grained personal access tokens used before the given time. + // This is a timestamp in ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ. + LastUsedBefore string `url:"last_used_before,omitempty"` + + // Only show fine-grained personal access tokens used after the given time. + // This is a timestamp in ISO 8601 format: YYYY-MM-DDTHH:MM:SSZ. + LastUsedAfter string `url:"last_used_after,omitempty"` + + ListOptions +} + +// ListFineGrainedPersonalAccessTokens lists approved fine-grained personal access tokens owned by organization members that can access organization resources. +// Only GitHub Apps can call this API, using the `Personal access tokens` organization permissions (read). +// +// GitHub API docs: https://docs.github.com/rest/orgs/personal-access-tokens#list-fine-grained-personal-access-tokens-with-access-to-organization-resources +// +//meta:operation GET /orgs/{org}/personal-access-tokens +func (s *OrganizationsService) ListFineGrainedPersonalAccessTokens(ctx context.Context, org string, opts *ListFineGrainedPATOptions) ([]*PersonalAccessToken, *Response, error) { + u := fmt.Sprintf("orgs/%v/personal-access-tokens", org) + // The `owner` parameter is a special case that uses the `owner[]=...` format and needs a custom function to format it correctly. + u, err := addListFineGrainedPATOptions(u, opts) + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest(http.MethodGet, u, opts) + if err != nil { + return nil, nil, err + } + + var pats []*PersonalAccessToken + + resp, err := s.client.Do(ctx, req, &pats) + if err != nil { + return nil, resp, err + } + + return pats, resp, nil +} + +// ReviewPersonalAccessTokenRequestOptions specifies the parameters to the ReviewPersonalAccessTokenRequest method. +type ReviewPersonalAccessTokenRequestOptions struct { + Action string `json:"action"` + Reason *string `json:"reason,omitempty"` +} + +// ReviewPersonalAccessTokenRequest approves or denies a pending request to access organization resources via a fine-grained personal access token. +// Only GitHub Apps can call this API, using the `organization_personal_access_token_requests: write` permission. +// `action` can be one of `approve` or `deny`. +// +// GitHub API docs: https://docs.github.com/rest/orgs/personal-access-tokens#review-a-request-to-access-organization-resources-with-a-fine-grained-personal-access-token +// +//meta:operation POST /orgs/{org}/personal-access-token-requests/{pat_request_id} +func (s *OrganizationsService) ReviewPersonalAccessTokenRequest(ctx context.Context, org string, requestID int64, opts ReviewPersonalAccessTokenRequestOptions) (*Response, error) { + u := fmt.Sprintf("orgs/%v/personal-access-token-requests/%v", org, requestID) + + req, err := s.client.NewRequest(http.MethodPost, u, &opts) + if err != nil { + return nil, err + } + + return s.client.Do(ctx, req, nil) +} + +// addListFineGrainedPATOptions adds the owner parameter to the URL query string with the correct format if it is set. +// +// GitHub API expects the owner parameter to be a list of strings in the `owner[]=...` format. +// For multiple owner values, the owner parameter is repeated in the query string. +// +// Example: +// owner[]=user1&owner[]=user2 +// This will filter the results to only include fine-grained personal access tokens owned by `user1` and `user2`. +// +// This function ensures the owner parameter is formatted correctly in the URL query string. +func addListFineGrainedPATOptions(s string, opts *ListFineGrainedPATOptions) (string, error) { + u, err := addOptions(s, opts) + if err != nil { + return s, err + } + + if len(opts.Owner) > 0 { + ownerVals := make([]string, len(opts.Owner)) + for i, owner := range opts.Owner { + ownerVals[i] = fmt.Sprintf("owner[]=%s", url.QueryEscape(owner)) + } + ownerQuery := strings.Join(ownerVals, "&") + + if strings.Contains(u, "?") { + u += "&" + ownerQuery + } else { + u += "?" + ownerQuery + } + } + + return u, nil +} diff --git a/vendor/github.com/google/go-github/v62/github/orgs_projects.go b/vendor/github.com/google/go-github/v66/github/orgs_projects.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs_projects.go rename to vendor/github.com/google/go-github/v66/github/orgs_projects.go diff --git a/vendor/github.com/google/go-github/v62/github/orgs_properties.go b/vendor/github.com/google/go-github/v66/github/orgs_properties.go similarity index 80% rename from vendor/github.com/google/go-github/v62/github/orgs_properties.go rename to vendor/github.com/google/go-github/v66/github/orgs_properties.go index 2e88b7f4f..3387d98d7 100644 --- a/vendor/github.com/google/go-github/v62/github/orgs_properties.go +++ b/vendor/github.com/google/go-github/v66/github/orgs_properties.go @@ -7,6 +7,7 @@ package github import ( "context" + "encoding/json" "fmt" ) @@ -15,12 +16,19 @@ type CustomProperty struct { // PropertyName is required for most endpoints except when calling CreateOrUpdateCustomProperty; // where this is sent in the path and thus can be omitted. PropertyName *string `json:"property_name,omitempty"` - // Possible values for ValueType are: string, single_select - ValueType string `json:"value_type"` - Required *bool `json:"required,omitempty"` - DefaultValue *string `json:"default_value,omitempty"` - Description *string `json:"description,omitempty"` + // The type of the value for the property. Can be one of: string, single_select. + ValueType string `json:"value_type"` + // Whether the property is required. + Required *bool `json:"required,omitempty"` + // Default value of the property. + DefaultValue *string `json:"default_value,omitempty"` + // Short description of the property. + Description *string `json:"description,omitempty"` + // An ordered list of the allowed values of the property. The property can have up to 200 + // allowed values. AllowedValues []string `json:"allowed_values,omitempty"` + // Who can edit the values of the property. Can be one of: org_actors, org_and_repo_actors, nil (null). + ValuesEditableBy *string `json:"values_editable_by,omitempty"` } // RepoCustomPropertyValue represents a repository custom property value. @@ -33,8 +41,42 @@ type RepoCustomPropertyValue struct { // CustomPropertyValue represents a custom property value. type CustomPropertyValue struct { - PropertyName string `json:"property_name"` - Value *string `json:"value,omitempty"` + PropertyName string `json:"property_name"` + Value interface{} `json:"value"` +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +// This helps us handle the fact that Value can be either a string, []string, or nil. +func (cpv *CustomPropertyValue) UnmarshalJSON(data []byte) error { + type aliasCustomPropertyValue CustomPropertyValue + aux := &struct { + *aliasCustomPropertyValue + }{ + aliasCustomPropertyValue: (*aliasCustomPropertyValue)(cpv), + } + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + switch v := aux.Value.(type) { + case nil: + cpv.Value = nil + case string: + cpv.Value = v + case []interface{}: + strSlice := make([]string, len(v)) + for i, item := range v { + if str, ok := item.(string); ok { + strSlice[i] = str + } else { + return fmt.Errorf("non-string value in string array") + } + } + cpv.Value = strSlice + default: + return fmt.Errorf("unexpected value type: %T", v) + } + return nil } // GetAllCustomProperties gets all custom properties that are defined for the specified organization. diff --git a/vendor/github.com/google/go-github/v62/github/orgs_rules.go b/vendor/github.com/google/go-github/v66/github/orgs_rules.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs_rules.go rename to vendor/github.com/google/go-github/v66/github/orgs_rules.go diff --git a/vendor/github.com/google/go-github/v62/github/orgs_security_managers.go b/vendor/github.com/google/go-github/v66/github/orgs_security_managers.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs_security_managers.go rename to vendor/github.com/google/go-github/v66/github/orgs_security_managers.go diff --git a/vendor/github.com/google/go-github/v62/github/orgs_users_blocking.go b/vendor/github.com/google/go-github/v66/github/orgs_users_blocking.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/orgs_users_blocking.go rename to vendor/github.com/google/go-github/v66/github/orgs_users_blocking.go diff --git a/vendor/github.com/google/go-github/v62/github/packages.go b/vendor/github.com/google/go-github/v66/github/packages.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/packages.go rename to vendor/github.com/google/go-github/v66/github/packages.go diff --git a/vendor/github.com/google/go-github/v62/github/projects.go b/vendor/github.com/google/go-github/v66/github/projects.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/projects.go rename to vendor/github.com/google/go-github/v66/github/projects.go diff --git a/vendor/github.com/google/go-github/v62/github/pulls.go b/vendor/github.com/google/go-github/v66/github/pulls.go similarity index 97% rename from vendor/github.com/google/go-github/v62/github/pulls.go rename to vendor/github.com/google/go-github/v66/github/pulls.go index 80df9fa68..b66850269 100644 --- a/vendor/github.com/google/go-github/v62/github/pulls.go +++ b/vendor/github.com/google/go-github/v66/github/pulls.go @@ -250,9 +250,16 @@ func (s *PullRequestsService) GetRaw(ctx context.Context, owner string, repo str // NewPullRequest represents a new pull request to be created. type NewPullRequest struct { - Title *string `json:"title,omitempty"` - Head *string `json:"head,omitempty"` - HeadRepo *string `json:"head_repo,omitempty"` + Title *string `json:"title,omitempty"` + // The name of the branch where your changes are implemented. For + // cross-repository pull requests in the same network, namespace head with + // a user like this: username:branch. + Head *string `json:"head,omitempty"` + HeadRepo *string `json:"head_repo,omitempty"` + // The name of the branch you want the changes pulled into. This should be + // an existing branch on the current repository. You cannot submit a pull + // request to one repository that requests a merge to a base of another + // repository. Base *string `json:"base,omitempty"` Body *string `json:"body,omitempty"` Issue *int `json:"issue,omitempty"` diff --git a/vendor/github.com/google/go-github/v62/github/pulls_comments.go b/vendor/github.com/google/go-github/v66/github/pulls_comments.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/pulls_comments.go rename to vendor/github.com/google/go-github/v66/github/pulls_comments.go diff --git a/vendor/github.com/google/go-github/v62/github/pulls_reviewers.go b/vendor/github.com/google/go-github/v66/github/pulls_reviewers.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/pulls_reviewers.go rename to vendor/github.com/google/go-github/v66/github/pulls_reviewers.go diff --git a/vendor/github.com/google/go-github/v62/github/pulls_reviews.go b/vendor/github.com/google/go-github/v66/github/pulls_reviews.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/pulls_reviews.go rename to vendor/github.com/google/go-github/v66/github/pulls_reviews.go diff --git a/vendor/github.com/google/go-github/v62/github/pulls_threads.go b/vendor/github.com/google/go-github/v66/github/pulls_threads.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/pulls_threads.go rename to vendor/github.com/google/go-github/v66/github/pulls_threads.go diff --git a/vendor/github.com/google/go-github/v62/github/rate_limit.go b/vendor/github.com/google/go-github/v66/github/rate_limit.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/rate_limit.go rename to vendor/github.com/google/go-github/v66/github/rate_limit.go diff --git a/vendor/github.com/google/go-github/v62/github/reactions.go b/vendor/github.com/google/go-github/v66/github/reactions.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/reactions.go rename to vendor/github.com/google/go-github/v66/github/reactions.go diff --git a/vendor/github.com/google/go-github/v62/github/repos.go b/vendor/github.com/google/go-github/v66/github/repos.go similarity index 95% rename from vendor/github.com/google/go-github/v62/github/repos.go rename to vendor/github.com/google/go-github/v66/github/repos.go index 630ce748c..d928771df 100644 --- a/vendor/github.com/google/go-github/v62/github/repos.go +++ b/vendor/github.com/google/go-github/v66/github/repos.go @@ -27,59 +27,59 @@ type RepositoriesService service // Repository represents a GitHub repository. type Repository struct { - ID *int64 `json:"id,omitempty"` - NodeID *string `json:"node_id,omitempty"` - Owner *User `json:"owner,omitempty"` - Name *string `json:"name,omitempty"` - FullName *string `json:"full_name,omitempty"` - Description *string `json:"description,omitempty"` - Homepage *string `json:"homepage,omitempty"` - CodeOfConduct *CodeOfConduct `json:"code_of_conduct,omitempty"` - DefaultBranch *string `json:"default_branch,omitempty"` - MasterBranch *string `json:"master_branch,omitempty"` - CreatedAt *Timestamp `json:"created_at,omitempty"` - PushedAt *Timestamp `json:"pushed_at,omitempty"` - UpdatedAt *Timestamp `json:"updated_at,omitempty"` - HTMLURL *string `json:"html_url,omitempty"` - CloneURL *string `json:"clone_url,omitempty"` - GitURL *string `json:"git_url,omitempty"` - MirrorURL *string `json:"mirror_url,omitempty"` - SSHURL *string `json:"ssh_url,omitempty"` - SVNURL *string `json:"svn_url,omitempty"` - Language *string `json:"language,omitempty"` - Fork *bool `json:"fork,omitempty"` - ForksCount *int `json:"forks_count,omitempty"` - NetworkCount *int `json:"network_count,omitempty"` - OpenIssuesCount *int `json:"open_issues_count,omitempty"` - OpenIssues *int `json:"open_issues,omitempty"` // Deprecated: Replaced by OpenIssuesCount. For backward compatibility OpenIssues is still populated. - StargazersCount *int `json:"stargazers_count,omitempty"` - SubscribersCount *int `json:"subscribers_count,omitempty"` - WatchersCount *int `json:"watchers_count,omitempty"` // Deprecated: Replaced by StargazersCount. For backward compatibility WatchersCount is still populated. - Watchers *int `json:"watchers,omitempty"` // Deprecated: Replaced by StargazersCount. For backward compatibility Watchers is still populated. - Size *int `json:"size,omitempty"` - AutoInit *bool `json:"auto_init,omitempty"` - Parent *Repository `json:"parent,omitempty"` - Source *Repository `json:"source,omitempty"` - TemplateRepository *Repository `json:"template_repository,omitempty"` - Organization *Organization `json:"organization,omitempty"` - Permissions map[string]bool `json:"permissions,omitempty"` - AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"` - AllowUpdateBranch *bool `json:"allow_update_branch,omitempty"` - AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"` - AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"` - AllowAutoMerge *bool `json:"allow_auto_merge,omitempty"` - AllowForking *bool `json:"allow_forking,omitempty"` - WebCommitSignoffRequired *bool `json:"web_commit_signoff_required,omitempty"` - DeleteBranchOnMerge *bool `json:"delete_branch_on_merge,omitempty"` - UseSquashPRTitleAsDefault *bool `json:"use_squash_pr_title_as_default,omitempty"` - SquashMergeCommitTitle *string `json:"squash_merge_commit_title,omitempty"` // Can be one of: "PR_TITLE", "COMMIT_OR_PR_TITLE" - SquashMergeCommitMessage *string `json:"squash_merge_commit_message,omitempty"` // Can be one of: "PR_BODY", "COMMIT_MESSAGES", "BLANK" - MergeCommitTitle *string `json:"merge_commit_title,omitempty"` // Can be one of: "PR_TITLE", "MERGE_MESSAGE" - MergeCommitMessage *string `json:"merge_commit_message,omitempty"` // Can be one of: "PR_BODY", "PR_TITLE", "BLANK" - Topics []string `json:"topics,omitempty"` - CustomProperties map[string]string `json:"custom_properties,omitempty"` - Archived *bool `json:"archived,omitempty"` - Disabled *bool `json:"disabled,omitempty"` + ID *int64 `json:"id,omitempty"` + NodeID *string `json:"node_id,omitempty"` + Owner *User `json:"owner,omitempty"` + Name *string `json:"name,omitempty"` + FullName *string `json:"full_name,omitempty"` + Description *string `json:"description,omitempty"` + Homepage *string `json:"homepage,omitempty"` + CodeOfConduct *CodeOfConduct `json:"code_of_conduct,omitempty"` + DefaultBranch *string `json:"default_branch,omitempty"` + MasterBranch *string `json:"master_branch,omitempty"` + CreatedAt *Timestamp `json:"created_at,omitempty"` + PushedAt *Timestamp `json:"pushed_at,omitempty"` + UpdatedAt *Timestamp `json:"updated_at,omitempty"` + HTMLURL *string `json:"html_url,omitempty"` + CloneURL *string `json:"clone_url,omitempty"` + GitURL *string `json:"git_url,omitempty"` + MirrorURL *string `json:"mirror_url,omitempty"` + SSHURL *string `json:"ssh_url,omitempty"` + SVNURL *string `json:"svn_url,omitempty"` + Language *string `json:"language,omitempty"` + Fork *bool `json:"fork,omitempty"` + ForksCount *int `json:"forks_count,omitempty"` + NetworkCount *int `json:"network_count,omitempty"` + OpenIssuesCount *int `json:"open_issues_count,omitempty"` + OpenIssues *int `json:"open_issues,omitempty"` // Deprecated: Replaced by OpenIssuesCount. For backward compatibility OpenIssues is still populated. + StargazersCount *int `json:"stargazers_count,omitempty"` + SubscribersCount *int `json:"subscribers_count,omitempty"` + WatchersCount *int `json:"watchers_count,omitempty"` // Deprecated: Replaced by StargazersCount. For backward compatibility WatchersCount is still populated. + Watchers *int `json:"watchers,omitempty"` // Deprecated: Replaced by StargazersCount. For backward compatibility Watchers is still populated. + Size *int `json:"size,omitempty"` + AutoInit *bool `json:"auto_init,omitempty"` + Parent *Repository `json:"parent,omitempty"` + Source *Repository `json:"source,omitempty"` + TemplateRepository *Repository `json:"template_repository,omitempty"` + Organization *Organization `json:"organization,omitempty"` + Permissions map[string]bool `json:"permissions,omitempty"` + AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"` + AllowUpdateBranch *bool `json:"allow_update_branch,omitempty"` + AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"` + AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"` + AllowAutoMerge *bool `json:"allow_auto_merge,omitempty"` + AllowForking *bool `json:"allow_forking,omitempty"` + WebCommitSignoffRequired *bool `json:"web_commit_signoff_required,omitempty"` + DeleteBranchOnMerge *bool `json:"delete_branch_on_merge,omitempty"` + UseSquashPRTitleAsDefault *bool `json:"use_squash_pr_title_as_default,omitempty"` + SquashMergeCommitTitle *string `json:"squash_merge_commit_title,omitempty"` // Can be one of: "PR_TITLE", "COMMIT_OR_PR_TITLE" + SquashMergeCommitMessage *string `json:"squash_merge_commit_message,omitempty"` // Can be one of: "PR_BODY", "COMMIT_MESSAGES", "BLANK" + MergeCommitTitle *string `json:"merge_commit_title,omitempty"` // Can be one of: "PR_TITLE", "MERGE_MESSAGE" + MergeCommitMessage *string `json:"merge_commit_message,omitempty"` // Can be one of: "PR_BODY", "PR_TITLE", "BLANK" + Topics []string `json:"topics,omitempty"` + CustomProperties map[string]interface{} `json:"custom_properties,omitempty"` + Archived *bool `json:"archived,omitempty"` + Disabled *bool `json:"disabled,omitempty"` // Only provided when using RepositoriesService.Get while in preview License *License `json:"license,omitempty"` @@ -1363,7 +1363,7 @@ type BypassPullRequestAllowancesRequest struct { // DismissalRestrictions specifies which users and teams can dismiss pull request reviews. type DismissalRestrictions struct { - // The list of users who can dimiss pull request reviews. + // The list of users who can dismiss pull request reviews. Users []*User `json:"users"` // The list of teams which can dismiss pull request reviews. Teams []*Team `json:"teams"` @@ -1372,7 +1372,7 @@ type DismissalRestrictions struct { } // DismissalRestrictionsRequest represents the request to create/edit the -// restriction to allows only specific users, teams or apps to dimiss pull request reviews. It is +// restriction to allows only specific users, teams or apps to dismiss pull request reviews. It is // separate from DismissalRestrictions above because the request structure is // different from the response structure. // Note: Both Users and Teams must be nil, or both must be non-nil. diff --git a/vendor/github.com/google/go-github/v62/github/repos_actions_access.go b/vendor/github.com/google/go-github/v66/github/repos_actions_access.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_actions_access.go rename to vendor/github.com/google/go-github/v66/github/repos_actions_access.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_actions_allowed.go b/vendor/github.com/google/go-github/v66/github/repos_actions_allowed.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_actions_allowed.go rename to vendor/github.com/google/go-github/v66/github/repos_actions_allowed.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_actions_permissions.go b/vendor/github.com/google/go-github/v66/github/repos_actions_permissions.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_actions_permissions.go rename to vendor/github.com/google/go-github/v66/github/repos_actions_permissions.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_autolinks.go b/vendor/github.com/google/go-github/v66/github/repos_autolinks.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_autolinks.go rename to vendor/github.com/google/go-github/v66/github/repos_autolinks.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_codeowners.go b/vendor/github.com/google/go-github/v66/github/repos_codeowners.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_codeowners.go rename to vendor/github.com/google/go-github/v66/github/repos_codeowners.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_collaborators.go b/vendor/github.com/google/go-github/v66/github/repos_collaborators.go similarity index 99% rename from vendor/github.com/google/go-github/v62/github/repos_collaborators.go rename to vendor/github.com/google/go-github/v66/github/repos_collaborators.go index 15a4e77a2..d6c985359 100644 --- a/vendor/github.com/google/go-github/v62/github/repos_collaborators.go +++ b/vendor/github.com/google/go-github/v66/github/repos_collaborators.go @@ -99,6 +99,8 @@ type RepositoryPermissionLevel struct { Permission *string `json:"permission,omitempty"` User *User `json:"user,omitempty"` + + RoleName *string `json:"role_name,omitempty"` } // GetPermissionLevel retrieves the specific permission level a collaborator has for a given repository. diff --git a/vendor/github.com/google/go-github/v62/github/repos_comments.go b/vendor/github.com/google/go-github/v66/github/repos_comments.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_comments.go rename to vendor/github.com/google/go-github/v66/github/repos_comments.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_commits.go b/vendor/github.com/google/go-github/v66/github/repos_commits.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_commits.go rename to vendor/github.com/google/go-github/v66/github/repos_commits.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_community_health.go b/vendor/github.com/google/go-github/v66/github/repos_community_health.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_community_health.go rename to vendor/github.com/google/go-github/v66/github/repos_community_health.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_contents.go b/vendor/github.com/google/go-github/v66/github/repos_contents.go similarity index 96% rename from vendor/github.com/google/go-github/v62/github/repos_contents.go rename to vendor/github.com/google/go-github/v66/github/repos_contents.go index 9539a5c42..97539aeeb 100644 --- a/vendor/github.com/google/go-github/v62/github/repos_contents.go +++ b/vendor/github.com/google/go-github/v66/github/repos_contents.go @@ -150,7 +150,11 @@ func (s *RepositoriesService) DownloadContents(ctx context.Context, owner, repo, return nil, resp, fmt.Errorf("no download link found for %s", filepath) } - dlResp, err := s.client.client.Get(*contents.DownloadURL) + dlReq, err := http.NewRequestWithContext(ctx, http.MethodGet, *contents.DownloadURL, nil) + if err != nil { + return nil, resp, err + } + dlResp, err := s.client.client.Do(dlReq) if err != nil { return nil, &Response{Response: dlResp}, err } @@ -188,7 +192,11 @@ func (s *RepositoriesService) DownloadContentsWithMeta(ctx context.Context, owne return nil, contents, resp, fmt.Errorf("no download link found for %s", filepath) } - dlResp, err := s.client.client.Get(*contents.DownloadURL) + dlReq, err := http.NewRequestWithContext(ctx, http.MethodGet, *contents.DownloadURL, nil) + if err != nil { + return nil, contents, resp, err + } + dlResp, err := s.client.client.Do(dlReq) if err != nil { return nil, contents, &Response{Response: dlResp}, err } @@ -346,7 +354,7 @@ func (s *RepositoriesService) GetArchiveLink(ctx context.Context, owner, repo st } defer resp.Body.Close() - if resp.StatusCode != http.StatusFound { + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusFound { return nil, newResponse(resp), fmt.Errorf("unexpected status code: %s", resp.Status) } diff --git a/vendor/github.com/google/go-github/v62/github/repos_deployment_branch_policies.go b/vendor/github.com/google/go-github/v66/github/repos_deployment_branch_policies.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_deployment_branch_policies.go rename to vendor/github.com/google/go-github/v66/github/repos_deployment_branch_policies.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_deployment_protection_rules.go b/vendor/github.com/google/go-github/v66/github/repos_deployment_protection_rules.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_deployment_protection_rules.go rename to vendor/github.com/google/go-github/v66/github/repos_deployment_protection_rules.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_deployments.go b/vendor/github.com/google/go-github/v66/github/repos_deployments.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_deployments.go rename to vendor/github.com/google/go-github/v66/github/repos_deployments.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_environments.go b/vendor/github.com/google/go-github/v66/github/repos_environments.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_environments.go rename to vendor/github.com/google/go-github/v66/github/repos_environments.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_forks.go b/vendor/github.com/google/go-github/v66/github/repos_forks.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_forks.go rename to vendor/github.com/google/go-github/v66/github/repos_forks.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_hooks.go b/vendor/github.com/google/go-github/v66/github/repos_hooks.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_hooks.go rename to vendor/github.com/google/go-github/v66/github/repos_hooks.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_hooks_configuration.go b/vendor/github.com/google/go-github/v66/github/repos_hooks_configuration.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_hooks_configuration.go rename to vendor/github.com/google/go-github/v66/github/repos_hooks_configuration.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_hooks_deliveries.go b/vendor/github.com/google/go-github/v66/github/repos_hooks_deliveries.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_hooks_deliveries.go rename to vendor/github.com/google/go-github/v66/github/repos_hooks_deliveries.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_invitations.go b/vendor/github.com/google/go-github/v66/github/repos_invitations.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_invitations.go rename to vendor/github.com/google/go-github/v66/github/repos_invitations.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_keys.go b/vendor/github.com/google/go-github/v66/github/repos_keys.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_keys.go rename to vendor/github.com/google/go-github/v66/github/repos_keys.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_lfs.go b/vendor/github.com/google/go-github/v66/github/repos_lfs.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_lfs.go rename to vendor/github.com/google/go-github/v66/github/repos_lfs.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_merging.go b/vendor/github.com/google/go-github/v66/github/repos_merging.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_merging.go rename to vendor/github.com/google/go-github/v66/github/repos_merging.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_pages.go b/vendor/github.com/google/go-github/v66/github/repos_pages.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_pages.go rename to vendor/github.com/google/go-github/v66/github/repos_pages.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_prereceive_hooks.go b/vendor/github.com/google/go-github/v66/github/repos_prereceive_hooks.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_prereceive_hooks.go rename to vendor/github.com/google/go-github/v66/github/repos_prereceive_hooks.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_projects.go b/vendor/github.com/google/go-github/v66/github/repos_projects.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_projects.go rename to vendor/github.com/google/go-github/v66/github/repos_projects.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_properties.go b/vendor/github.com/google/go-github/v66/github/repos_properties.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_properties.go rename to vendor/github.com/google/go-github/v66/github/repos_properties.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_releases.go b/vendor/github.com/google/go-github/v66/github/repos_releases.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_releases.go rename to vendor/github.com/google/go-github/v66/github/repos_releases.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_rules.go b/vendor/github.com/google/go-github/v66/github/repos_rules.go similarity index 68% rename from vendor/github.com/google/go-github/v62/github/repos_rules.go rename to vendor/github.com/google/go-github/v66/github/repos_rules.go index 6f046a356..d09bb71d1 100644 --- a/vendor/github.com/google/go-github/v62/github/repos_rules.go +++ b/vendor/github.com/google/go-github/v66/github/repos_rules.go @@ -48,12 +48,26 @@ type RulesetRepositoryIDsConditionParameters struct { RepositoryIDs []int64 `json:"repository_ids,omitempty"` } +// RulesetRepositoryPropertyTargetParameters represents a repository_property name and values to be used for targeting. +type RulesetRepositoryPropertyTargetParameters struct { + Name string `json:"name"` + Values []string `json:"property_values"` + Source string `json:"source"` +} + +// RulesetRepositoryPropertyConditionParameters represents the conditions object for repository_property. +type RulesetRepositoryPropertyConditionParameters struct { + Include []RulesetRepositoryPropertyTargetParameters `json:"include"` + Exclude []RulesetRepositoryPropertyTargetParameters `json:"exclude"` +} + // RulesetConditions represents the conditions object in a ruleset. -// Set either RepositoryName or RepositoryID, not both. +// Set either RepositoryName or RepositoryID or RepositoryProperty, not more than one. type RulesetConditions struct { - RefName *RulesetRefConditionParameters `json:"ref_name,omitempty"` - RepositoryName *RulesetRepositoryNamesConditionParameters `json:"repository_name,omitempty"` - RepositoryID *RulesetRepositoryIDsConditionParameters `json:"repository_id,omitempty"` + RefName *RulesetRefConditionParameters `json:"ref_name,omitempty"` + RepositoryName *RulesetRepositoryNamesConditionParameters `json:"repository_name,omitempty"` + RepositoryID *RulesetRepositoryIDsConditionParameters `json:"repository_id,omitempty"` + RepositoryProperty *RulesetRepositoryPropertyConditionParameters `json:"repository_property,omitempty"` } // RulePatternParameters represents the rule pattern parameters. @@ -66,6 +80,26 @@ type RulePatternParameters struct { Pattern string `json:"pattern"` } +// RuleFileParameters represents a list of file paths. +type RuleFileParameters struct { + RestrictedFilePaths *[]string `json:"restricted_file_paths"` +} + +// RuleMaxFilePathLengthParameters represents the max_file_path_length rule parameters. +type RuleMaxFilePathLengthParameters struct { + MaxFilePathLength int `json:"max_file_path_length"` +} + +// RuleFileExtensionRestrictionParameters represents the file_extension_restriction rule parameters. +type RuleFileExtensionRestrictionParameters struct { + RestrictedFileExtensions []string `json:"restricted_file_extensions"` +} + +// RuleMaxFileSizeParameters represents the max_file_size rule parameters. +type RuleMaxFileSizeParameters struct { + MaxFileSize int64 `json:"max_file_size"` +} + // UpdateAllowsFetchAndMergeRuleParameters represents the update rule parameters. type UpdateAllowsFetchAndMergeRuleParameters struct { UpdateAllowsFetchAndMerge bool `json:"update_allows_fetch_and_merge"` @@ -91,8 +125,22 @@ type RuleRequiredStatusChecks struct { IntegrationID *int64 `json:"integration_id,omitempty"` } +// MergeQueueRuleParameters represents the merge_queue rule parameters. +type MergeQueueRuleParameters struct { + CheckResponseTimeoutMinutes int `json:"check_response_timeout_minutes"` + // Possible values for GroupingStrategy are: ALLGREEN, HEADGREEN + GroupingStrategy string `json:"grouping_strategy"` + MaxEntriesToBuild int `json:"max_entries_to_build"` + MaxEntriesToMerge int `json:"max_entries_to_merge"` + // Possible values for MergeMethod are: MERGE, SQUASH, REBASE + MergeMethod string `json:"merge_method"` + MinEntriesToMerge int `json:"min_entries_to_merge"` + MinEntriesToMergeWaitMinutes int `json:"min_entries_to_merge_wait_minutes"` +} + // RequiredStatusChecksRuleParameters represents the required_status_checks rule parameters. type RequiredStatusChecksRuleParameters struct { + DoNotEnforceOnCreate bool `json:"do_not_enforce_on_create"` RequiredStatusChecks []RuleRequiredStatusChecks `json:"required_status_checks"` StrictRequiredStatusChecksPolicy bool `json:"strict_required_status_checks_policy"` } @@ -134,7 +182,7 @@ func (r *RepositoryRule) UnmarshalJSON(data []byte) error { r.Type = RepositoryRule.Type switch RepositoryRule.Type { - case "creation", "deletion", "merge_queue", "non_fast_forward", "required_linear_history", "required_signatures": + case "creation", "deletion", "non_fast_forward", "required_linear_history", "required_signatures": r.Parameters = nil case "update": if RepositoryRule.Parameters == nil { @@ -150,7 +198,20 @@ func (r *RepositoryRule) UnmarshalJSON(data []byte) error { rawParams := json.RawMessage(bytes) r.Parameters = &rawParams + case "merge_queue": + if RepositoryRule.Parameters == nil { + r.Parameters = nil + return nil + } + params := MergeQueueRuleParameters{} + if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { + return err + } + bytes, _ := json.Marshal(params) + rawParams := json.RawMessage(bytes) + + r.Parameters = &rawParams case "required_deployments": params := RequiredDeploymentEnvironmentsRuleParameters{} if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { @@ -200,6 +261,42 @@ func (r *RepositoryRule) UnmarshalJSON(data []byte) error { bytes, _ := json.Marshal(params) rawParams := json.RawMessage(bytes) + r.Parameters = &rawParams + case "file_path_restriction": + params := RuleFileParameters{} + if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { + return err + } + bytes, _ := json.Marshal(params) + rawParams := json.RawMessage(bytes) + + r.Parameters = &rawParams + case "max_file_path_length": + params := RuleMaxFilePathLengthParameters{} + if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { + return err + } + bytes, _ := json.Marshal(params) + rawParams := json.RawMessage(bytes) + + r.Parameters = &rawParams + case "file_extension_restriction": + params := RuleFileExtensionRestrictionParameters{} + if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { + return err + } + bytes, _ := json.Marshal(params) + rawParams := json.RawMessage(bytes) + + r.Parameters = &rawParams + case "max_file_size": + params := RuleMaxFileSizeParameters{} + if err := json.Unmarshal(*RepositoryRule.Parameters, ¶ms); err != nil { + return err + } + bytes, _ := json.Marshal(params) + rawParams := json.RawMessage(bytes) + r.Parameters = &rawParams default: r.Type = "" @@ -211,7 +308,17 @@ func (r *RepositoryRule) UnmarshalJSON(data []byte) error { } // NewMergeQueueRule creates a rule to only allow merges via a merge queue. -func NewMergeQueueRule() (rule *RepositoryRule) { +func NewMergeQueueRule(params *MergeQueueRuleParameters) (rule *RepositoryRule) { + if params != nil { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "merge_queue", + Parameters: &rawParams, + } + } return &RepositoryRule{ Type: "merge_queue", } @@ -377,11 +484,59 @@ func NewRequiredWorkflowsRule(params *RequiredWorkflowsRuleParameters) (rule *Re } } +// NewFilePathRestrictionRule creates a rule to restrict file paths from being pushed to. +func NewFilePathRestrictionRule(params *RuleFileParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "file_path_restriction", + Parameters: &rawParams, + } +} + +// NewMaxFilePathLengthRule creates a rule to restrict file paths longer than the limit from being pushed. +func NewMaxFilePathLengthRule(params *RuleMaxFilePathLengthParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "max_file_path_length", + Parameters: &rawParams, + } +} + +// NewFileExtensionRestrictionRule creates a rule to restrict file extensions from being pushed to a commit. +func NewFileExtensionRestrictionRule(params *RuleFileExtensionRestrictionParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "file_extension_restriction", + Parameters: &rawParams, + } +} + +// NewMaxFileSizeRule creates a rule to restrict file sizes from being pushed to a commit. +func NewMaxFileSizeRule(params *RuleMaxFileSizeParameters) (rule *RepositoryRule) { + bytes, _ := json.Marshal(params) + + rawParams := json.RawMessage(bytes) + + return &RepositoryRule{ + Type: "max_file_size", + Parameters: &rawParams, + } +} + // Ruleset represents a GitHub ruleset object. type Ruleset struct { ID *int64 `json:"id,omitempty"` Name string `json:"name"` - // Possible values for Target are branch, tag + // Possible values for Target are branch, tag, push Target *string `json:"target,omitempty"` // Possible values for SourceType are: Repository, Organization SourceType *string `json:"source_type,omitempty"` @@ -395,6 +550,24 @@ type Ruleset struct { Rules []*RepositoryRule `json:"rules,omitempty"` } +// rulesetNoOmitBypassActors represents a GitHub ruleset object. The struct does not omit bypassActors if the field is nil or an empty array is passed. +type rulesetNoOmitBypassActors struct { + ID *int64 `json:"id,omitempty"` + Name string `json:"name"` + // Possible values for Target are branch, tag + Target *string `json:"target,omitempty"` + // Possible values for SourceType are: Repository, Organization + SourceType *string `json:"source_type,omitempty"` + Source string `json:"source"` + // Possible values for Enforcement are: disabled, active, evaluate + Enforcement string `json:"enforcement"` + BypassActors []*BypassActor `json:"bypass_actors"` + NodeID *string `json:"node_id,omitempty"` + Links *RulesetLinks `json:"_links,omitempty"` + Conditions *RulesetConditions `json:"conditions,omitempty"` + Rules []*RepositoryRule `json:"rules,omitempty"` +} + // GetRulesForBranch gets all the rules that apply to the specified branch. // // GitHub API docs: https://docs.github.com/rest/repos/rules#get-rules-for-a-branch @@ -507,6 +680,48 @@ func (s *RepositoriesService) UpdateRuleset(ctx context.Context, owner, repo str return ruleset, resp, nil } +// UpdateRulesetNoBypassActor updates a ruleset for the specified repository. +// +// This function is necessary as the UpdateRuleset function does not marshal ByPassActor if passed as nil or an empty array. +// +// GitHub API docs: https://docs.github.com/rest/repos/rules#update-a-repository-ruleset +// +//meta:operation PUT /repos/{owner}/{repo}/rulesets/{ruleset_id} +func (s *RepositoriesService) UpdateRulesetNoBypassActor(ctx context.Context, owner, repo string, rulesetID int64, rs *Ruleset) (*Ruleset, *Response, error) { + u := fmt.Sprintf("repos/%v/%v/rulesets/%v", owner, repo, rulesetID) + + rsNoBypassActor := &rulesetNoOmitBypassActors{} + + if rs != nil { + rsNoBypassActor = &rulesetNoOmitBypassActors{ + ID: rs.ID, + Name: rs.Name, + Target: rs.Target, + SourceType: rs.SourceType, + Source: rs.Source, + Enforcement: rs.Enforcement, + BypassActors: rs.BypassActors, + NodeID: rs.NodeID, + Links: rs.Links, + Conditions: rs.Conditions, + Rules: rs.Rules, + } + } + + req, err := s.client.NewRequest("PUT", u, rsNoBypassActor) + if err != nil { + return nil, nil, err + } + + var ruleSet *Ruleset + resp, err := s.client.Do(ctx, req, &ruleSet) + if err != nil { + return nil, resp, err + } + + return ruleSet, resp, nil +} + // DeleteRuleset deletes a ruleset for the specified repository. // // GitHub API docs: https://docs.github.com/rest/repos/rules#delete-a-repository-ruleset diff --git a/vendor/github.com/google/go-github/v62/github/repos_stats.go b/vendor/github.com/google/go-github/v66/github/repos_stats.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_stats.go rename to vendor/github.com/google/go-github/v66/github/repos_stats.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_statuses.go b/vendor/github.com/google/go-github/v66/github/repos_statuses.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_statuses.go rename to vendor/github.com/google/go-github/v66/github/repos_statuses.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_tags.go b/vendor/github.com/google/go-github/v66/github/repos_tags.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_tags.go rename to vendor/github.com/google/go-github/v66/github/repos_tags.go diff --git a/vendor/github.com/google/go-github/v62/github/repos_traffic.go b/vendor/github.com/google/go-github/v66/github/repos_traffic.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/repos_traffic.go rename to vendor/github.com/google/go-github/v66/github/repos_traffic.go diff --git a/vendor/github.com/google/go-github/v62/github/scim.go b/vendor/github.com/google/go-github/v66/github/scim.go similarity index 96% rename from vendor/github.com/google/go-github/v62/github/scim.go rename to vendor/github.com/google/go-github/v66/github/scim.go index 02136d7ef..4b34c1663 100644 --- a/vendor/github.com/google/go-github/v62/github/scim.go +++ b/vendor/github.com/google/go-github/v66/github/scim.go @@ -110,19 +110,21 @@ func (s *SCIMService) ListSCIMProvisionedIdentities(ctx context.Context, org str // GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/scim/scim#provision-and-invite-a-scim-user // //meta:operation POST /scim/v2/organizations/{org}/Users -func (s *SCIMService) ProvisionAndInviteSCIMUser(ctx context.Context, org string, opts *SCIMUserAttributes) (*Response, error) { +func (s *SCIMService) ProvisionAndInviteSCIMUser(ctx context.Context, org string, opts *SCIMUserAttributes) (*SCIMUserAttributes, *Response, error) { u := fmt.Sprintf("scim/v2/organizations/%v/Users", org) - u, err := addOptions(u, opts) + + req, err := s.client.NewRequest("POST", u, opts) if err != nil { - return nil, err + return nil, nil, err } - req, err := s.client.NewRequest("POST", u, nil) + user := new(SCIMUserAttributes) + resp, err := s.client.Do(ctx, req, user) if err != nil { - return nil, err + return nil, resp, err } - return s.client.Do(ctx, req, nil) + return user, resp, nil } // GetSCIMProvisioningInfoForUser returns SCIM provisioning information for a user. diff --git a/vendor/github.com/google/go-github/v62/github/search.go b/vendor/github.com/google/go-github/v66/github/search.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/search.go rename to vendor/github.com/google/go-github/v66/github/search.go diff --git a/vendor/github.com/google/go-github/v62/github/secret_scanning.go b/vendor/github.com/google/go-github/v66/github/secret_scanning.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/secret_scanning.go rename to vendor/github.com/google/go-github/v66/github/secret_scanning.go diff --git a/vendor/github.com/google/go-github/v62/github/security_advisories.go b/vendor/github.com/google/go-github/v66/github/security_advisories.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/security_advisories.go rename to vendor/github.com/google/go-github/v66/github/security_advisories.go diff --git a/vendor/github.com/google/go-github/v62/github/strings.go b/vendor/github.com/google/go-github/v66/github/strings.go similarity index 85% rename from vendor/github.com/google/go-github/v62/github/strings.go rename to vendor/github.com/google/go-github/v66/github/strings.go index 147c515e2..f5e61aa32 100644 --- a/vendor/github.com/google/go-github/v62/github/strings.go +++ b/vendor/github.com/google/go-github/v66/github/strings.go @@ -27,7 +27,7 @@ func Stringify(message interface{}) string { func stringifyValue(w *bytes.Buffer, val reflect.Value) { if val.Kind() == reflect.Ptr && val.IsNil() { - w.Write([]byte("")) + w.WriteString("") return } @@ -37,20 +37,20 @@ func stringifyValue(w *bytes.Buffer, val reflect.Value) { case reflect.String: fmt.Fprintf(w, `"%s"`, v) case reflect.Slice: - w.Write([]byte{'['}) + w.WriteByte('[') for i := 0; i < v.Len(); i++ { if i > 0 { - w.Write([]byte{' '}) + w.WriteByte(' ') } stringifyValue(w, v.Index(i)) } - w.Write([]byte{']'}) + w.WriteByte(']') return case reflect.Struct: if v.Type().Name() != "" { - w.Write([]byte(v.Type().String())) + w.WriteString(v.Type().String()) } // special handling of Timestamp values @@ -59,7 +59,7 @@ func stringifyValue(w *bytes.Buffer, val reflect.Value) { return } - w.Write([]byte{'{'}) + w.WriteByte('{') var sep bool for i := 0; i < v.NumField(); i++ { @@ -75,17 +75,17 @@ func stringifyValue(w *bytes.Buffer, val reflect.Value) { } if sep { - w.Write([]byte(", ")) + w.WriteString(", ") } else { sep = true } - w.Write([]byte(v.Type().Field(i).Name)) - w.Write([]byte{':'}) + w.WriteString(v.Type().Field(i).Name) + w.WriteByte(':') stringifyValue(w, fv) } - w.Write([]byte{'}'}) + w.WriteByte('}') default: if v.CanInterface() { fmt.Fprint(w, v.Interface()) diff --git a/vendor/github.com/google/go-github/v62/github/teams.go b/vendor/github.com/google/go-github/v66/github/teams.go similarity index 97% rename from vendor/github.com/google/go-github/v62/github/teams.go rename to vendor/github.com/google/go-github/v66/github/teams.go index 0f6cc9d16..10dbebcbb 100644 --- a/vendor/github.com/google/go-github/v62/github/teams.go +++ b/vendor/github.com/google/go-github/v66/github/teams.go @@ -206,13 +206,14 @@ func (s *TeamsService) CreateTeam(ctx context.Context, org string, team NewTeam) // "parent_team_id" field will be null. It is for internal use // only and should not be exported. type newTeamNoParent struct { - Name string `json:"name"` - Description *string `json:"description,omitempty"` - Maintainers []string `json:"maintainers,omitempty"` - RepoNames []string `json:"repo_names,omitempty"` - ParentTeamID *int64 `json:"parent_team_id"` // This will be "null" - Privacy *string `json:"privacy,omitempty"` - LDAPDN *string `json:"ldap_dn,omitempty"` + Name string `json:"name"` + Description *string `json:"description,omitempty"` + Maintainers []string `json:"maintainers,omitempty"` + RepoNames []string `json:"repo_names,omitempty"` + ParentTeamID *int64 `json:"parent_team_id"` // This will be "null" + NotificationSetting *string `json:"notification_setting,omitempty"` + Privacy *string `json:"privacy,omitempty"` + LDAPDN *string `json:"ldap_dn,omitempty"` } // copyNewTeamWithoutParent is used to set the "parent_team_id" @@ -220,12 +221,13 @@ type newTeamNoParent struct { // It is for internal use only and should not be exported. func copyNewTeamWithoutParent(team *NewTeam) *newTeamNoParent { return &newTeamNoParent{ - Name: team.Name, - Description: team.Description, - Maintainers: team.Maintainers, - RepoNames: team.RepoNames, - Privacy: team.Privacy, - LDAPDN: team.LDAPDN, + Name: team.Name, + Description: team.Description, + Maintainers: team.Maintainers, + RepoNames: team.RepoNames, + NotificationSetting: team.NotificationSetting, + Privacy: team.Privacy, + LDAPDN: team.LDAPDN, } } @@ -796,6 +798,14 @@ func (s *TeamsService) RemoveTeamProjectBySlug(ctx context.Context, org, slug st return s.client.Do(ctx, req, nil) } +// ListIDPGroupsOptions specifies the optional parameters to the ListIDPGroupsInOrganization method. +type ListIDPGroupsOptions struct { + // Filters the results to return only those that begin with the value specified by this parameter. + Query string `url:"q,omitempty"` + + ListCursorOptions +} + // IDPGroupList represents a list of external identity provider (IDP) groups. type IDPGroupList struct { Groups []*IDPGroup `json:"groups"` @@ -813,7 +823,7 @@ type IDPGroup struct { // GitHub API docs: https://docs.github.com/enterprise-cloud@latest/rest/teams/team-sync#list-idp-groups-for-an-organization // //meta:operation GET /orgs/{org}/team-sync/groups -func (s *TeamsService) ListIDPGroupsInOrganization(ctx context.Context, org string, opts *ListCursorOptions) (*IDPGroupList, *Response, error) { +func (s *TeamsService) ListIDPGroupsInOrganization(ctx context.Context, org string, opts *ListIDPGroupsOptions) (*IDPGroupList, *Response, error) { u := fmt.Sprintf("orgs/%v/team-sync/groups", org) u, err := addOptions(u, opts) if err != nil { diff --git a/vendor/github.com/google/go-github/v62/github/teams_discussion_comments.go b/vendor/github.com/google/go-github/v66/github/teams_discussion_comments.go similarity index 98% rename from vendor/github.com/google/go-github/v62/github/teams_discussion_comments.go rename to vendor/github.com/google/go-github/v66/github/teams_discussion_comments.go index ad3818c13..eba6fdf46 100644 --- a/vendor/github.com/google/go-github/v62/github/teams_discussion_comments.go +++ b/vendor/github.com/google/go-github/v66/github/teams_discussion_comments.go @@ -10,7 +10,7 @@ import ( "fmt" ) -// DiscussionComment represents a GitHub dicussion in a team. +// DiscussionComment represents a GitHub discussion in a team. type DiscussionComment struct { Author *User `json:"author,omitempty"` Body *string `json:"body,omitempty"` @@ -145,8 +145,8 @@ func (s *TeamsService) GetCommentBySlug(ctx context.Context, org, slug string, d // GitHub API docs: https://docs.github.com/rest/teams/discussion-comments#create-a-discussion-comment // //meta:operation POST /orgs/{org}/teams/{team_slug}/discussions/{discussion_number}/comments -func (s *TeamsService) CreateCommentByID(ctx context.Context, orgID, teamID int64, discsusionNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) { - u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments", orgID, teamID, discsusionNumber) +func (s *TeamsService) CreateCommentByID(ctx context.Context, orgID, teamID int64, discussionNumber int, comment DiscussionComment) (*DiscussionComment, *Response, error) { + u := fmt.Sprintf("organizations/%v/team/%v/discussions/%v/comments", orgID, teamID, discussionNumber) req, err := s.client.NewRequest("POST", u, comment) if err != nil { return nil, nil, err diff --git a/vendor/github.com/google/go-github/v62/github/teams_discussions.go b/vendor/github.com/google/go-github/v66/github/teams_discussions.go similarity index 99% rename from vendor/github.com/google/go-github/v62/github/teams_discussions.go rename to vendor/github.com/google/go-github/v66/github/teams_discussions.go index ee78c032a..b056525f4 100644 --- a/vendor/github.com/google/go-github/v62/github/teams_discussions.go +++ b/vendor/github.com/google/go-github/v66/github/teams_discussions.go @@ -10,7 +10,7 @@ import ( "fmt" ) -// TeamDiscussion represents a GitHub dicussion in a team. +// TeamDiscussion represents a GitHub discussion in a team. type TeamDiscussion struct { Author *User `json:"author,omitempty"` Body *string `json:"body,omitempty"` diff --git a/vendor/github.com/google/go-github/v62/github/teams_members.go b/vendor/github.com/google/go-github/v66/github/teams_members.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/teams_members.go rename to vendor/github.com/google/go-github/v66/github/teams_members.go diff --git a/vendor/github.com/google/go-github/v62/github/timestamp.go b/vendor/github.com/google/go-github/v66/github/timestamp.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/timestamp.go rename to vendor/github.com/google/go-github/v66/github/timestamp.go diff --git a/vendor/github.com/google/go-github/v62/github/users.go b/vendor/github.com/google/go-github/v66/github/users.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/users.go rename to vendor/github.com/google/go-github/v66/github/users.go diff --git a/vendor/github.com/google/go-github/v62/github/users_administration.go b/vendor/github.com/google/go-github/v66/github/users_administration.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/users_administration.go rename to vendor/github.com/google/go-github/v66/github/users_administration.go diff --git a/vendor/github.com/google/go-github/v62/github/users_blocking.go b/vendor/github.com/google/go-github/v66/github/users_blocking.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/users_blocking.go rename to vendor/github.com/google/go-github/v66/github/users_blocking.go diff --git a/vendor/github.com/google/go-github/v62/github/users_emails.go b/vendor/github.com/google/go-github/v66/github/users_emails.go similarity index 96% rename from vendor/github.com/google/go-github/v62/github/users_emails.go rename to vendor/github.com/google/go-github/v66/github/users_emails.go index 8386de250..189187a74 100644 --- a/vendor/github.com/google/go-github/v62/github/users_emails.go +++ b/vendor/github.com/google/go-github/v66/github/users_emails.go @@ -86,11 +86,11 @@ func (s *UsersService) DeleteEmails(ctx context.Context, emails []string) (*Resp func (s *UsersService) SetEmailVisibility(ctx context.Context, visibility string) ([]*UserEmail, *Response, error) { u := "user/email/visibility" - updateVisiblilityReq := &UserEmail{ + updateVisibilityReq := &UserEmail{ Visibility: &visibility, } - req, err := s.client.NewRequest("PATCH", u, updateVisiblilityReq) + req, err := s.client.NewRequest("PATCH", u, updateVisibilityReq) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/google/go-github/v62/github/users_followers.go b/vendor/github.com/google/go-github/v66/github/users_followers.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/users_followers.go rename to vendor/github.com/google/go-github/v66/github/users_followers.go diff --git a/vendor/github.com/google/go-github/v62/github/users_gpg_keys.go b/vendor/github.com/google/go-github/v66/github/users_gpg_keys.go similarity index 98% rename from vendor/github.com/google/go-github/v62/github/users_gpg_keys.go rename to vendor/github.com/google/go-github/v66/github/users_gpg_keys.go index de7caaf1b..2f296a1ef 100644 --- a/vendor/github.com/google/go-github/v62/github/users_gpg_keys.go +++ b/vendor/github.com/google/go-github/v66/github/users_gpg_keys.go @@ -97,7 +97,7 @@ func (s *UsersService) GetGPGKey(ctx context.Context, id int64) (*GPGKey, *Respo return key, resp, nil } -// CreateGPGKey creates a GPG key. It requires authenticatation via Basic Auth +// CreateGPGKey creates a GPG key. It requires authentication via Basic Auth // or OAuth with at least write:gpg_key scope. // // GitHub API docs: https://docs.github.com/rest/users/gpg-keys#create-a-gpg-key-for-the-authenticated-user diff --git a/vendor/github.com/google/go-github/v62/github/users_keys.go b/vendor/github.com/google/go-github/v66/github/users_keys.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/users_keys.go rename to vendor/github.com/google/go-github/v66/github/users_keys.go diff --git a/vendor/github.com/google/go-github/v62/github/users_packages.go b/vendor/github.com/google/go-github/v66/github/users_packages.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/users_packages.go rename to vendor/github.com/google/go-github/v66/github/users_packages.go diff --git a/vendor/github.com/google/go-github/v62/github/users_projects.go b/vendor/github.com/google/go-github/v66/github/users_projects.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/users_projects.go rename to vendor/github.com/google/go-github/v66/github/users_projects.go diff --git a/vendor/github.com/google/go-github/v62/github/users_ssh_signing_keys.go b/vendor/github.com/google/go-github/v66/github/users_ssh_signing_keys.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/users_ssh_signing_keys.go rename to vendor/github.com/google/go-github/v66/github/users_ssh_signing_keys.go diff --git a/vendor/github.com/google/go-github/v62/github/with_appengine.go b/vendor/github.com/google/go-github/v66/github/with_appengine.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/with_appengine.go rename to vendor/github.com/google/go-github/v66/github/with_appengine.go diff --git a/vendor/github.com/google/go-github/v62/github/without_appengine.go b/vendor/github.com/google/go-github/v66/github/without_appengine.go similarity index 100% rename from vendor/github.com/google/go-github/v62/github/without_appengine.go rename to vendor/github.com/google/go-github/v66/github/without_appengine.go diff --git a/vendor/github.com/redis/go-redis/v9/.golangci.yml b/vendor/github.com/redis/go-redis/v9/.golangci.yml index de514554a..285aca6b3 100644 --- a/vendor/github.com/redis/go-redis/v9/.golangci.yml +++ b/vendor/github.com/redis/go-redis/v9/.golangci.yml @@ -1,4 +1,3 @@ run: - concurrency: 8 - deadline: 5m + timeout: 5m tests: false diff --git a/vendor/github.com/redis/go-redis/v9/Makefile b/vendor/github.com/redis/go-redis/v9/Makefile index d8d007596..1a6bd1786 100644 --- a/vendor/github.com/redis/go-redis/v9/Makefile +++ b/vendor/github.com/redis/go-redis/v9/Makefile @@ -14,6 +14,7 @@ test: testdeps go test ./... -short -race && \ go test ./... -run=NONE -bench=. -benchmem && \ env GOOS=linux GOARCH=386 go test && \ + go test -coverprofile=coverage.txt -covermode=atomic ./... && \ go vet); \ done cd internal/customvet && go build . diff --git a/vendor/github.com/redis/go-redis/v9/README.md b/vendor/github.com/redis/go-redis/v9/README.md index e7df5dfd6..e71367659 100644 --- a/vendor/github.com/redis/go-redis/v9/README.md +++ b/vendor/github.com/redis/go-redis/v9/README.md @@ -3,6 +3,7 @@ [![build workflow](https://github.com/redis/go-redis/actions/workflows/build.yml/badge.svg)](https://github.com/redis/go-redis/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/redis/go-redis/v9)](https://pkg.go.dev/github.com/redis/go-redis/v9?tab=doc) [![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/) +[![codecov](https://codecov.io/github/redis/go-redis/graph/badge.svg?token=tsrCZKuSSw)](https://codecov.io/github/redis/go-redis) [![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) > go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). @@ -182,6 +183,24 @@ rdb := redis.NewClient(&redis.Options{ }) ``` +#### Unstable RESP3 Structures for RediSearch Commands +When integrating Redis with application functionalities using RESP3, it's important to note that some response structures aren't final yet. This is especially true for more complex structures like search and query results. We recommend using RESP2 when using the search and query capabilities, but we plan to stabilize the RESP3-based API-s in the coming versions. You can find more guidance in the upcoming release notes. + +To enable unstable RESP3, set the option in your client configuration: + +```go +redis.NewClient(&redis.Options{ + UnstableResp3: true, + }) +``` +**Note:** When UnstableResp3 mode is enabled, it's necessary to use RawResult() and RawVal() to retrieve a raw data. + Since, raw response is the only option for unstable search commands Val() and Result() calls wouldn't have any affect on them: + +```go +res1, err := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptions{}).RawResult() +val1 := client.FTSearchWithArgs(ctx, "txt", "foo bar", &redis.FTSearchOptions{}).RawVal() +``` + ## Contributing Please see [out contributing guidelines](CONTRIBUTING.md) to help us improve this library! diff --git a/vendor/github.com/redis/go-redis/v9/command.go b/vendor/github.com/redis/go-redis/v9/command.go index 59ba08969..f3d0e49b7 100644 --- a/vendor/github.com/redis/go-redis/v9/command.go +++ b/vendor/github.com/redis/go-redis/v9/command.go @@ -40,7 +40,7 @@ type Cmder interface { readTimeout() *time.Duration readReply(rd *proto.Reader) error - + readRawReply(rd *proto.Reader) error SetErr(error) Err() error } @@ -122,11 +122,11 @@ func cmdString(cmd Cmder, val interface{}) string { //------------------------------------------------------------------------------ type baseCmd struct { - ctx context.Context - args []interface{} - err error - keyPos int8 - + ctx context.Context + args []interface{} + err error + keyPos int8 + rawVal interface{} _readTimeout *time.Duration } @@ -167,6 +167,8 @@ func (cmd *baseCmd) stringArg(pos int) string { switch v := arg.(type) { case string: return v + case []byte: + return string(v) default: // TODO: consider using appendArg return fmt.Sprint(v) @@ -197,6 +199,11 @@ func (cmd *baseCmd) setReadTimeout(d time.Duration) { cmd._readTimeout = &d } +func (cmd *baseCmd) readRawReply(rd *proto.Reader) (err error) { + cmd.rawVal, err = rd.ReadReply() + return err +} + //------------------------------------------------------------------------------ type Cmd struct { @@ -3787,6 +3794,65 @@ func (cmd *MapStringStringSliceCmd) readReply(rd *proto.Reader) error { return nil } +// ----------------------------------------------------------------------- +// MapStringInterfaceCmd represents a command that returns a map of strings to interface{}. +type MapMapStringInterfaceCmd struct { + baseCmd + val map[string]interface{} +} + +func NewMapMapStringInterfaceCmd(ctx context.Context, args ...interface{}) *MapMapStringInterfaceCmd { + return &MapMapStringInterfaceCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *MapMapStringInterfaceCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *MapMapStringInterfaceCmd) SetVal(val map[string]interface{}) { + cmd.val = val +} + +func (cmd *MapMapStringInterfaceCmd) Result() (map[string]interface{}, error) { + return cmd.val, cmd.err +} + +func (cmd *MapMapStringInterfaceCmd) Val() map[string]interface{} { + return cmd.val +} + +func (cmd *MapMapStringInterfaceCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadArrayLen() + if err != nil { + return err + } + + data := make(map[string]interface{}, n/2) + for i := 0; i < n; i += 2 { + _, err := rd.ReadArrayLen() + if err != nil { + cmd.err = err + } + key, err := rd.ReadString() + if err != nil { + cmd.err = err + } + value, err := rd.ReadString() + if err != nil { + cmd.err = err + } + data[key] = value + } + + cmd.val = data + return nil +} + //----------------------------------------------------------------------- type MapStringInterfaceSliceCmd struct { diff --git a/vendor/github.com/redis/go-redis/v9/commands.go b/vendor/github.com/redis/go-redis/v9/commands.go index db5959446..034daa235 100644 --- a/vendor/github.com/redis/go-redis/v9/commands.go +++ b/vendor/github.com/redis/go-redis/v9/commands.go @@ -220,6 +220,7 @@ type Cmdable interface { ProbabilisticCmdable PubSubCmdable ScriptingFunctionsCmdable + SearchCmdable SetCmdable SortedSetCmdable StringCmdable diff --git a/vendor/github.com/redis/go-redis/v9/hash_commands.go b/vendor/github.com/redis/go-redis/v9/hash_commands.go index dcffdcdd9..6596c6f5f 100644 --- a/vendor/github.com/redis/go-redis/v9/hash_commands.go +++ b/vendor/github.com/redis/go-redis/v9/hash_commands.go @@ -225,7 +225,7 @@ func (c cmdable) HExpire(ctx context.Context, key string, expiration time.Durati return cmd } -// HExpire - Sets the expiration time for specified fields in a hash in seconds. +// HExpireWithArgs - Sets the expiration time for specified fields in a hash in seconds. // It requires a key, an expiration duration, a struct with boolean flags for conditional expiration settings (NX, XX, GT, LT), and a list of fields. // The command constructs an argument list starting with "HEXPIRE", followed by the key, duration, any conditional flags, and the specified fields. // For more information - https://redis.io/commands/hexpire/ diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go index d315c7937..7f45bc0bb 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn.go @@ -3,10 +3,8 @@ package pool import ( "bufio" "context" - "crypto/tls" "net" "sync/atomic" - "syscall" "time" "github.com/redis/go-redis/v9/internal/proto" @@ -18,9 +16,6 @@ type Conn struct { usedAt int64 // atomic netConn net.Conn - // for checking the health status of the connection, it may be nil. - sysConn syscall.Conn - rd *proto.Reader bw *bufio.Writer wr *proto.Writer @@ -39,7 +34,6 @@ func NewConn(netConn net.Conn) *Conn { cn.bw = bufio.NewWriter(netConn) cn.wr = proto.NewWriter(cn.bw) cn.SetUsedAt(time.Now()) - cn.setSysConn() return cn } @@ -56,22 +50,6 @@ func (cn *Conn) SetNetConn(netConn net.Conn) { cn.netConn = netConn cn.rd.Reset(netConn) cn.bw.Reset(netConn) - cn.setSysConn() -} - -func (cn *Conn) setSysConn() { - cn.sysConn = nil - conn := cn.netConn - if conn == nil { - return - } - if tlsConn, ok := conn.(*tls.Conn); ok { - conn = tlsConn.NetConn() - } - - if sysConn, ok := conn.(syscall.Conn); ok { - cn.sysConn = sysConn - } } func (cn *Conn) Write(b []byte) (int, error) { diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go index f28833850..83190d394 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check.go @@ -5,12 +5,21 @@ package pool import ( "errors" "io" + "net" "syscall" + "time" ) var errUnexpectedRead = errors.New("unexpected read from socket") -func connCheck(sysConn syscall.Conn) error { +func connCheck(conn net.Conn) error { + // Reset previous timeout. + _ = conn.SetDeadline(time.Time{}) + + sysConn, ok := conn.(syscall.Conn) + if !ok { + return nil + } rawConn, err := sysConn.SyscallConn() if err != nil { return err diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go index 2d270cf56..295da1268 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/conn_check_dummy.go @@ -2,8 +2,8 @@ package pool -import "syscall" +import "net" -func connCheck(_ syscall.Conn) error { +func connCheck(conn net.Conn) error { return nil } diff --git a/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go b/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go index 9b84993cc..2125f3e13 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go +++ b/vendor/github.com/redis/go-redis/v9/internal/pool/pool.go @@ -499,8 +499,6 @@ func (p *ConnPool) Close() error { return firstErr } -var zeroTime = time.Time{} - func (p *ConnPool) isHealthyConn(cn *Conn) bool { now := time.Now() @@ -511,12 +509,8 @@ func (p *ConnPool) isHealthyConn(cn *Conn) bool { return false } - if cn.sysConn != nil { - // reset previous timeout. - _ = cn.netConn.SetDeadline(zeroTime) - if connCheck(cn.sysConn) != nil { - return false - } + if connCheck(cn.netConn) != nil { + return false } cn.SetUsedAt(now) diff --git a/vendor/github.com/redis/go-redis/v9/internal/util.go b/vendor/github.com/redis/go-redis/v9/internal/util.go index 235a91afa..cc1bff24e 100644 --- a/vendor/github.com/redis/go-redis/v9/internal/util.go +++ b/vendor/github.com/redis/go-redis/v9/internal/util.go @@ -3,6 +3,7 @@ package internal import ( "context" "net" + "strconv" "strings" "time" @@ -81,3 +82,47 @@ func GetAddr(addr string) string { } return net.JoinHostPort(addr[:ind], addr[ind+1:]) } + +func ToInteger(val interface{}) int { + switch v := val.(type) { + case int: + return v + case int64: + return int(v) + case string: + i, _ := strconv.Atoi(v) + return i + default: + return 0 + } +} + +func ToFloat(val interface{}) float64 { + switch v := val.(type) { + case float64: + return v + case string: + f, _ := strconv.ParseFloat(v, 64) + return f + default: + return 0.0 + } +} + +func ToString(val interface{}) string { + if str, ok := val.(string); ok { + return str + } + return "" +} + +func ToStringSlice(val interface{}) []string { + if arr, ok := val.([]interface{}); ok { + result := make([]string, len(arr)) + for i, v := range arr { + result[i] = ToString(v) + } + return result + } + return nil +} diff --git a/vendor/github.com/redis/go-redis/v9/json.go b/vendor/github.com/redis/go-redis/v9/json.go index ca731db3a..b3cadf4b7 100644 --- a/vendor/github.com/redis/go-redis/v9/json.go +++ b/vendor/github.com/redis/go-redis/v9/json.go @@ -60,7 +60,7 @@ type JSONArrTrimArgs struct { type JSONCmd struct { baseCmd val string - expanded []interface{} + expanded interface{} } var _ Cmder = (*JSONCmd)(nil) @@ -100,11 +100,11 @@ func (cmd *JSONCmd) Result() (string, error) { return cmd.Val(), cmd.Err() } -func (cmd JSONCmd) Expanded() (interface{}, error) { +func (cmd *JSONCmd) Expanded() (interface{}, error) { if len(cmd.val) != 0 && cmd.expanded == nil { err := json.Unmarshal([]byte(cmd.val), &cmd.expanded) if err != nil { - return "", err + return nil, err } } @@ -494,7 +494,7 @@ func (c cmdable) JSONMSet(ctx context.Context, params ...interface{}) *StatusCmd } // JSONNumIncrBy increments the number value stored at the specified path by the provided number. -// For more information, see https://redis.io/commands/json.numincreby +// For more information, see https://redis.io/docs/latest/commands/json.numincrby/ func (c cmdable) JSONNumIncrBy(ctx context.Context, key, path string, value float64) *JSONCmd { args := []interface{}{"JSON.NUMINCRBY", key, path, value} cmd := newJSONCmd(ctx, args...) diff --git a/vendor/github.com/redis/go-redis/v9/options.go b/vendor/github.com/redis/go-redis/v9/options.go index 6ed693a0b..60ff1ee49 100644 --- a/vendor/github.com/redis/go-redis/v9/options.go +++ b/vendor/github.com/redis/go-redis/v9/options.go @@ -153,6 +153,9 @@ type Options struct { // Add suffix to client name. Default is empty. IdentitySuffix string + + // UnstableResp3 enables Unstable mode for Redis Search module with RESP3. + UnstableResp3 bool } func (opt *Options) init() { diff --git a/vendor/github.com/redis/go-redis/v9/osscluster.go b/vendor/github.com/redis/go-redis/v9/osscluster.go index ce258ff36..9268120bf 100644 --- a/vendor/github.com/redis/go-redis/v9/osscluster.go +++ b/vendor/github.com/redis/go-redis/v9/osscluster.go @@ -90,6 +90,9 @@ type ClusterOptions struct { DisableIndentity bool // Disable set-lib on connect. Default is false. IdentitySuffix string // Add suffix to client name. Default is empty. + + // UnstableResp3 enables Unstable mode for Redis Search module with RESP3. + UnstableResp3 bool } func (opt *ClusterOptions) init() { @@ -304,7 +307,8 @@ func (opt *ClusterOptions) clientOptions() *Options { // much use for ClusterSlots config). This means we cannot execute the // READONLY command against that node -- setting readOnly to false in such // situations in the options below will prevent that from happening. - readOnly: opt.ReadOnly && opt.ClusterSlots == nil, + readOnly: opt.ReadOnly && opt.ClusterSlots == nil, + UnstableResp3: opt.UnstableResp3, } } @@ -465,9 +469,11 @@ func (c *clusterNodes) Addrs() ([]string, error) { closed := c.closed //nolint:ifshort if !closed { if len(c.activeAddrs) > 0 { - addrs = c.activeAddrs + addrs = make([]string, len(c.activeAddrs)) + copy(addrs, c.activeAddrs) } else { - addrs = c.addrs + addrs = make([]string, len(c.addrs)) + copy(addrs, c.addrs) } } c.mu.RUnlock() diff --git a/vendor/github.com/redis/go-redis/v9/redis.go b/vendor/github.com/redis/go-redis/v9/redis.go index 527afb677..ec3ff616a 100644 --- a/vendor/github.com/redis/go-redis/v9/redis.go +++ b/vendor/github.com/redis/go-redis/v9/redis.go @@ -41,7 +41,7 @@ type ( ) type hooksMixin struct { - hooksMu *sync.Mutex + hooksMu *sync.RWMutex slice []Hook initial hooks @@ -49,7 +49,7 @@ type hooksMixin struct { } func (hs *hooksMixin) initHooks(hooks hooks) { - hs.hooksMu = new(sync.Mutex) + hs.hooksMu = new(sync.RWMutex) hs.initial = hooks hs.chain() } @@ -151,7 +151,7 @@ func (hs *hooksMixin) clone() hooksMixin { clone := *hs l := len(clone.slice) clone.slice = clone.slice[:l:l] - clone.hooksMu = new(sync.Mutex) + clone.hooksMu = new(sync.RWMutex) return clone } @@ -176,9 +176,14 @@ func (hs *hooksMixin) withProcessPipelineHook( } func (hs *hooksMixin) dialHook(ctx context.Context, network, addr string) (net.Conn, error) { - hs.hooksMu.Lock() - defer hs.hooksMu.Unlock() - return hs.current.dial(ctx, network, addr) + // Access to hs.current is guarded by a read-only lock since it may be mutated by AddHook(...) + // while this dialer is concurrently accessed by the background connection pool population + // routine when MinIdleConns > 0. + hs.hooksMu.RLock() + current := hs.current + hs.hooksMu.RUnlock() + + return current.dial(ctx, network, addr) } func (hs *hooksMixin) processHook(ctx context.Context, cmd Cmder) error { @@ -412,6 +417,19 @@ func (c *baseClient) process(ctx context.Context, cmd Cmder) error { return lastErr } +func (c *baseClient) assertUnstableCommand(cmd Cmder) bool { + switch cmd.(type) { + case *AggregateCmd, *FTInfoCmd, *FTSpellCheckCmd, *FTSearchCmd, *FTSynDumpCmd: + if c.opt.UnstableResp3 { + return true + } else { + panic("RESP3 responses for this command are disabled because they may still change. Please set the flag UnstableResp3 . See the [README](https://github.com/redis/go-redis/blob/master/README.md) and the release notes for guidance.") + } + default: + return false + } +} + func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) { if attempt > 0 { if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { @@ -427,8 +445,12 @@ func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool atomic.StoreUint32(&retryTimeout, 1) return err } - - if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), cmd.readReply); err != nil { + readReplyFunc := cmd.readReply + // Apply unstable RESP3 search module. + if c.opt.Protocol != 2 && c.assertUnstableCommand(cmd) { + readReplyFunc = cmd.readRawReply + } + if err := cn.WithReader(c.context(ctx), c.cmdTimeout(cmd), readReplyFunc); err != nil { if cmd.readTimeout() == nil { atomic.StoreUint32(&retryTimeout, 1) } else { diff --git a/vendor/github.com/redis/go-redis/v9/ring.go b/vendor/github.com/redis/go-redis/v9/ring.go index 4ae00542b..b40221734 100644 --- a/vendor/github.com/redis/go-redis/v9/ring.go +++ b/vendor/github.com/redis/go-redis/v9/ring.go @@ -100,6 +100,7 @@ type RingOptions struct { DisableIndentity bool IdentitySuffix string + UnstableResp3 bool } func (opt *RingOptions) init() { @@ -168,6 +169,7 @@ func (opt *RingOptions) clientOptions() *Options { DisableIndentity: opt.DisableIndentity, IdentitySuffix: opt.IdentitySuffix, + UnstableResp3: opt.UnstableResp3, } } diff --git a/vendor/github.com/redis/go-redis/v9/search_commands.go b/vendor/github.com/redis/go-redis/v9/search_commands.go new file mode 100644 index 000000000..9359a723e --- /dev/null +++ b/vendor/github.com/redis/go-redis/v9/search_commands.go @@ -0,0 +1,2268 @@ +package redis + +import ( + "context" + "fmt" + "strconv" + + "github.com/redis/go-redis/v9/internal" + "github.com/redis/go-redis/v9/internal/proto" +) + +type SearchCmdable interface { + FT_List(ctx context.Context) *StringSliceCmd + FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd + FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd + FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd + FTAliasDel(ctx context.Context, alias string) *StatusCmd + FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd + FTAlter(ctx context.Context, index string, skipInitialScan bool, definition []interface{}) *StatusCmd + FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd + FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd + FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd + FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd + FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd + FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd + FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd + FTDictDump(ctx context.Context, dict string) *StringSliceCmd + FTDropIndex(ctx context.Context, index string) *StatusCmd + FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd + FTExplain(ctx context.Context, index string, query string) *StringCmd + FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd + FTInfo(ctx context.Context, index string) *FTInfoCmd + FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd + FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd + FTSearch(ctx context.Context, index string, query string) *FTSearchCmd + FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd + FTSynDump(ctx context.Context, index string) *FTSynDumpCmd + FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd + FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd + FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd +} + +type FTCreateOptions struct { + OnHash bool + OnJSON bool + Prefix []interface{} + Filter string + DefaultLanguage string + LanguageField string + Score float64 + ScoreField string + PayloadField string + MaxTextFields int + NoOffsets bool + Temporary int + NoHL bool + NoFields bool + NoFreqs bool + StopWords []interface{} + SkipInitialScan bool +} + +type FieldSchema struct { + FieldName string + As string + FieldType SearchFieldType + Sortable bool + UNF bool + NoStem bool + NoIndex bool + PhoneticMatcher string + Weight float64 + Separator string + CaseSensitive bool + WithSuffixtrie bool + VectorArgs *FTVectorArgs + GeoShapeFieldType string + IndexEmpty bool + IndexMissing bool +} + +type FTVectorArgs struct { + FlatOptions *FTFlatOptions + HNSWOptions *FTHNSWOptions +} + +type FTFlatOptions struct { + Type string + Dim int + DistanceMetric string + InitialCapacity int + BlockSize int +} + +type FTHNSWOptions struct { + Type string + Dim int + DistanceMetric string + InitialCapacity int + MaxEdgesPerNode int + MaxAllowedEdgesPerNode int + EFRunTime int + Epsilon float64 +} + +type FTDropIndexOptions struct { + DeleteDocs bool +} + +type SpellCheckTerms struct { + Include bool + Exclude bool + Dictionary string +} + +type FTExplainOptions struct { + Dialect string +} + +type FTSynUpdateOptions struct { + SkipInitialScan bool +} + +type SearchAggregator int + +const ( + SearchInvalid = SearchAggregator(iota) + SearchAvg + SearchSum + SearchMin + SearchMax + SearchCount + SearchCountDistinct + SearchCountDistinctish + SearchStdDev + SearchQuantile + SearchToList + SearchFirstValue + SearchRandomSample +) + +func (a SearchAggregator) String() string { + switch a { + case SearchInvalid: + return "" + case SearchAvg: + return "AVG" + case SearchSum: + return "SUM" + case SearchMin: + return "MIN" + case SearchMax: + return "MAX" + case SearchCount: + return "COUNT" + case SearchCountDistinct: + return "COUNT_DISTINCT" + case SearchCountDistinctish: + return "COUNT_DISTINCTISH" + case SearchStdDev: + return "STDDEV" + case SearchQuantile: + return "QUANTILE" + case SearchToList: + return "TOLIST" + case SearchFirstValue: + return "FIRST_VALUE" + case SearchRandomSample: + return "RANDOM_SAMPLE" + default: + return "" + } +} + +type SearchFieldType int + +const ( + SearchFieldTypeInvalid = SearchFieldType(iota) + SearchFieldTypeNumeric + SearchFieldTypeTag + SearchFieldTypeText + SearchFieldTypeGeo + SearchFieldTypeVector + SearchFieldTypeGeoShape +) + +func (t SearchFieldType) String() string { + switch t { + case SearchFieldTypeInvalid: + return "" + case SearchFieldTypeNumeric: + return "NUMERIC" + case SearchFieldTypeTag: + return "TAG" + case SearchFieldTypeText: + return "TEXT" + case SearchFieldTypeGeo: + return "GEO" + case SearchFieldTypeVector: + return "VECTOR" + case SearchFieldTypeGeoShape: + return "GEOSHAPE" + default: + return "TEXT" + } +} + +// Each AggregateReducer have different args. +// Please follow https://redis.io/docs/interact/search-and-query/search/aggregations/#supported-groupby-reducers for more information. +type FTAggregateReducer struct { + Reducer SearchAggregator + Args []interface{} + As string +} + +type FTAggregateGroupBy struct { + Fields []interface{} + Reduce []FTAggregateReducer +} + +type FTAggregateSortBy struct { + FieldName string + Asc bool + Desc bool +} + +type FTAggregateApply struct { + Field string + As string +} + +type FTAggregateLoad struct { + Field string + As string +} + +type FTAggregateWithCursor struct { + Count int + MaxIdle int +} + +type FTAggregateOptions struct { + Verbatim bool + LoadAll bool + Load []FTAggregateLoad + Timeout int + GroupBy []FTAggregateGroupBy + SortBy []FTAggregateSortBy + SortByMax int + Scorer string + AddScores bool + Apply []FTAggregateApply + LimitOffset int + Limit int + Filter string + WithCursor bool + WithCursorOptions *FTAggregateWithCursor + Params map[string]interface{} + DialectVersion int +} + +type FTSearchFilter struct { + FieldName interface{} + Min interface{} + Max interface{} +} + +type FTSearchGeoFilter struct { + FieldName string + Longitude float64 + Latitude float64 + Radius float64 + Unit string +} + +type FTSearchReturn struct { + FieldName string + As string +} + +type FTSearchSortBy struct { + FieldName string + Asc bool + Desc bool +} + +type FTSearchOptions struct { + NoContent bool + Verbatim bool + NoStopWords bool + WithScores bool + WithPayloads bool + WithSortKeys bool + Filters []FTSearchFilter + GeoFilter []FTSearchGeoFilter + InKeys []interface{} + InFields []interface{} + Return []FTSearchReturn + Slop int + Timeout int + InOrder bool + Language string + Expander string + Scorer string + ExplainScore bool + Payload string + SortBy []FTSearchSortBy + SortByWithCount bool + LimitOffset int + Limit int + Params map[string]interface{} + DialectVersion int +} + +type FTSynDumpResult struct { + Term string + Synonyms []string +} + +type FTSynDumpCmd struct { + baseCmd + val []FTSynDumpResult +} + +type FTAggregateResult struct { + Total int + Rows []AggregateRow +} + +type AggregateRow struct { + Fields map[string]interface{} +} + +type AggregateCmd struct { + baseCmd + val *FTAggregateResult +} + +type FTInfoResult struct { + IndexErrors IndexErrors + Attributes []FTAttribute + BytesPerRecordAvg string + Cleaning int + CursorStats CursorStats + DialectStats map[string]int + DocTableSizeMB float64 + FieldStatistics []FieldStatistic + GCStats GCStats + GeoshapesSzMB float64 + HashIndexingFailures int + IndexDefinition IndexDefinition + IndexName string + IndexOptions []string + Indexing int + InvertedSzMB float64 + KeyTableSizeMB float64 + MaxDocID int + NumDocs int + NumRecords int + NumTerms int + NumberOfUses int + OffsetBitsPerRecordAvg string + OffsetVectorsSzMB float64 + OffsetsPerTermAvg string + PercentIndexed float64 + RecordsPerDocAvg string + SortableValuesSizeMB float64 + TagOverheadSzMB float64 + TextOverheadSzMB float64 + TotalIndexMemorySzMB float64 + TotalIndexingTime int + TotalInvertedIndexBlocks int + VectorIndexSzMB float64 +} + +type IndexErrors struct { + IndexingFailures int + LastIndexingError string + LastIndexingErrorKey string +} + +type FTAttribute struct { + Identifier string + Attribute string + Type string + Weight float64 + Sortable bool + NoStem bool + NoIndex bool + UNF bool + PhoneticMatcher string + CaseSensitive bool + WithSuffixtrie bool +} + +type CursorStats struct { + GlobalIdle int + GlobalTotal int + IndexCapacity int + IndexTotal int +} + +type FieldStatistic struct { + Identifier string + Attribute string + IndexErrors IndexErrors +} + +type GCStats struct { + BytesCollected int + TotalMsRun int + TotalCycles int + AverageCycleTimeMs string + LastRunTimeMs int + GCNumericTreesMissed int + GCBlocksDenied int +} + +type IndexDefinition struct { + KeyType string + Prefixes []string + DefaultScore float64 +} + +type FTSpellCheckOptions struct { + Distance int + Terms *FTSpellCheckTerms + Dialect int +} + +type FTSpellCheckTerms struct { + Inclusion string // Either "INCLUDE" or "EXCLUDE" + Dictionary string + Terms []interface{} +} + +type SpellCheckResult struct { + Term string + Suggestions []SpellCheckSuggestion +} + +type SpellCheckSuggestion struct { + Score float64 + Suggestion string +} + +type FTSearchResult struct { + Total int + Docs []Document +} + +type Document struct { + ID string + Score *float64 + Payload *string + SortKey *string + Fields map[string]string +} + +type AggregateQuery []interface{} + +// FT_List - Lists all the existing indexes in the database. +// For more information, please refer to the Redis documentation: +// [FT._LIST]: (https://redis.io/commands/ft._list/) +func (c cmdable) FT_List(ctx context.Context) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "FT._LIST") + _ = c(ctx, cmd) + return cmd +} + +// FTAggregate - Performs a search query on an index and applies a series of aggregate transformations to the result. +// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. +// For more information, please refer to the Redis documentation: +// [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/) +func (c cmdable) FTAggregate(ctx context.Context, index string, query string) *MapStringInterfaceCmd { + args := []interface{}{"FT.AGGREGATE", index, query} + cmd := NewMapStringInterfaceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +func FTAggregateQuery(query string, options *FTAggregateOptions) AggregateQuery { + queryArgs := []interface{}{query} + if options != nil { + if options.Verbatim { + queryArgs = append(queryArgs, "VERBATIM") + } + + if options.Scorer != "" { + queryArgs = append(queryArgs, "SCORER", options.Scorer) + } + + if options.AddScores { + queryArgs = append(queryArgs, "ADDSCORES") + } + + if options.LoadAll && options.Load != nil { + panic("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive") + } + if options.LoadAll { + queryArgs = append(queryArgs, "LOAD", "*") + } + if options.Load != nil { + queryArgs = append(queryArgs, "LOAD", len(options.Load)) + index, count := len(queryArgs)-1, 0 + for _, load := range options.Load { + queryArgs = append(queryArgs, load.Field) + count++ + if load.As != "" { + queryArgs = append(queryArgs, "AS", load.As) + count += 2 + } + } + queryArgs[index] = count + } + + if options.Timeout > 0 { + queryArgs = append(queryArgs, "TIMEOUT", options.Timeout) + } + + for _, apply := range options.Apply { + queryArgs = append(queryArgs, "APPLY", apply.Field) + if apply.As != "" { + queryArgs = append(queryArgs, "AS", apply.As) + } + } + + if options.GroupBy != nil { + for _, groupBy := range options.GroupBy { + queryArgs = append(queryArgs, "GROUPBY", len(groupBy.Fields)) + queryArgs = append(queryArgs, groupBy.Fields...) + + for _, reducer := range groupBy.Reduce { + queryArgs = append(queryArgs, "REDUCE") + queryArgs = append(queryArgs, reducer.Reducer.String()) + if reducer.Args != nil { + queryArgs = append(queryArgs, len(reducer.Args)) + queryArgs = append(queryArgs, reducer.Args...) + } else { + queryArgs = append(queryArgs, 0) + } + if reducer.As != "" { + queryArgs = append(queryArgs, "AS", reducer.As) + } + } + } + } + if options.SortBy != nil { + queryArgs = append(queryArgs, "SORTBY") + sortByOptions := []interface{}{} + for _, sortBy := range options.SortBy { + sortByOptions = append(sortByOptions, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + panic("FT.AGGREGATE: ASC and DESC are mutually exclusive") + } + if sortBy.Asc { + sortByOptions = append(sortByOptions, "ASC") + } + if sortBy.Desc { + sortByOptions = append(sortByOptions, "DESC") + } + } + queryArgs = append(queryArgs, len(sortByOptions)) + queryArgs = append(queryArgs, sortByOptions...) + } + if options.SortByMax > 0 { + queryArgs = append(queryArgs, "MAX", options.SortByMax) + } + if options.LimitOffset >= 0 && options.Limit > 0 { + queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit) + } + if options.Filter != "" { + queryArgs = append(queryArgs, "FILTER", options.Filter) + } + if options.WithCursor { + queryArgs = append(queryArgs, "WITHCURSOR") + if options.WithCursorOptions != nil { + if options.WithCursorOptions.Count > 0 { + queryArgs = append(queryArgs, "COUNT", options.WithCursorOptions.Count) + } + if options.WithCursorOptions.MaxIdle > 0 { + queryArgs = append(queryArgs, "MAXIDLE", options.WithCursorOptions.MaxIdle) + } + } + } + if options.Params != nil { + queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + queryArgs = append(queryArgs, key, value) + } + } + + if options.DialectVersion > 0 { + queryArgs = append(queryArgs, "DIALECT", options.DialectVersion) + } + } + return queryArgs +} + +func ProcessAggregateResult(data []interface{}) (*FTAggregateResult, error) { + if len(data) == 0 { + return nil, fmt.Errorf("no data returned") + } + + total, ok := data[0].(int64) + if !ok { + return nil, fmt.Errorf("invalid total format") + } + + rows := make([]AggregateRow, 0, len(data)-1) + for _, row := range data[1:] { + fields, ok := row.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid row format") + } + + rowMap := make(map[string]interface{}) + for i := 0; i < len(fields); i += 2 { + key, ok := fields[i].(string) + if !ok { + return nil, fmt.Errorf("invalid field key format") + } + value := fields[i+1] + rowMap[key] = value + } + rows = append(rows, AggregateRow{Fields: rowMap}) + } + + result := &FTAggregateResult{ + Total: int(total), + Rows: rows, + } + return result, nil +} + +func NewAggregateCmd(ctx context.Context, args ...interface{}) *AggregateCmd { + return &AggregateCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *AggregateCmd) SetVal(val *FTAggregateResult) { + cmd.val = val +} + +func (cmd *AggregateCmd) Val() *FTAggregateResult { + return cmd.val +} + +func (cmd *AggregateCmd) Result() (*FTAggregateResult, error) { + return cmd.val, cmd.err +} + +func (cmd *AggregateCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *AggregateCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func (cmd *AggregateCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *AggregateCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + return err + } + cmd.val, err = ProcessAggregateResult(data) + if err != nil { + return err + } + return nil +} + +// FTAggregateWithArgs - Performs a search query on an index and applies a series of aggregate transformations to the result. +// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. +// This function also allows for specifying additional options such as: Verbatim, LoadAll, Load, Timeout, GroupBy, SortBy, SortByMax, Apply, LimitOffset, Limit, Filter, WithCursor, Params, and DialectVersion. +// For more information, please refer to the Redis documentation: +// [FT.AGGREGATE]: (https://redis.io/commands/ft.aggregate/) +func (c cmdable) FTAggregateWithArgs(ctx context.Context, index string, query string, options *FTAggregateOptions) *AggregateCmd { + args := []interface{}{"FT.AGGREGATE", index, query} + if options != nil { + if options.Verbatim { + args = append(args, "VERBATIM") + } + if options.Scorer != "" { + args = append(args, "SCORER", options.Scorer) + } + if options.AddScores { + args = append(args, "ADDSCORES") + } + if options.LoadAll && options.Load != nil { + panic("FT.AGGREGATE: LOADALL and LOAD are mutually exclusive") + } + if options.LoadAll { + args = append(args, "LOAD", "*") + } + if options.Load != nil { + args = append(args, "LOAD", len(options.Load)) + index, count := len(args)-1, 0 + for _, load := range options.Load { + args = append(args, load.Field) + count++ + if load.As != "" { + args = append(args, "AS", load.As) + count += 2 + } + } + args[index] = count + } + if options.Timeout > 0 { + args = append(args, "TIMEOUT", options.Timeout) + } + for _, apply := range options.Apply { + args = append(args, "APPLY", apply.Field) + if apply.As != "" { + args = append(args, "AS", apply.As) + } + } + if options.GroupBy != nil { + for _, groupBy := range options.GroupBy { + args = append(args, "GROUPBY", len(groupBy.Fields)) + args = append(args, groupBy.Fields...) + + for _, reducer := range groupBy.Reduce { + args = append(args, "REDUCE") + args = append(args, reducer.Reducer.String()) + if reducer.Args != nil { + args = append(args, len(reducer.Args)) + args = append(args, reducer.Args...) + } else { + args = append(args, 0) + } + if reducer.As != "" { + args = append(args, "AS", reducer.As) + } + } + } + } + if options.SortBy != nil { + args = append(args, "SORTBY") + sortByOptions := []interface{}{} + for _, sortBy := range options.SortBy { + sortByOptions = append(sortByOptions, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + panic("FT.AGGREGATE: ASC and DESC are mutually exclusive") + } + if sortBy.Asc { + sortByOptions = append(sortByOptions, "ASC") + } + if sortBy.Desc { + sortByOptions = append(sortByOptions, "DESC") + } + } + args = append(args, len(sortByOptions)) + args = append(args, sortByOptions...) + } + if options.SortByMax > 0 { + args = append(args, "MAX", options.SortByMax) + } + if options.LimitOffset >= 0 && options.Limit > 0 { + args = append(args, "LIMIT", options.LimitOffset, options.Limit) + } + if options.Filter != "" { + args = append(args, "FILTER", options.Filter) + } + if options.WithCursor { + args = append(args, "WITHCURSOR") + if options.WithCursorOptions != nil { + if options.WithCursorOptions.Count > 0 { + args = append(args, "COUNT", options.WithCursorOptions.Count) + } + if options.WithCursorOptions.MaxIdle > 0 { + args = append(args, "MAXIDLE", options.WithCursorOptions.MaxIdle) + } + } + } + if options.Params != nil { + args = append(args, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + args = append(args, key, value) + } + } + if options.DialectVersion > 0 { + args = append(args, "DIALECT", options.DialectVersion) + } + } + + cmd := NewAggregateCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTAliasAdd - Adds an alias to an index. +// The 'index' parameter specifies the index to which the alias is added, and the 'alias' parameter specifies the alias. +// For more information, please refer to the Redis documentation: +// [FT.ALIASADD]: (https://redis.io/commands/ft.aliasadd/) +func (c cmdable) FTAliasAdd(ctx context.Context, index string, alias string) *StatusCmd { + args := []interface{}{"FT.ALIASADD", alias, index} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTAliasDel - Removes an alias from an index. +// The 'alias' parameter specifies the alias to be removed. +// For more information, please refer to the Redis documentation: +// [FT.ALIASDEL]: (https://redis.io/commands/ft.aliasdel/) +func (c cmdable) FTAliasDel(ctx context.Context, alias string) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.ALIASDEL", alias) + _ = c(ctx, cmd) + return cmd +} + +// FTAliasUpdate - Updates an alias to an index. +// The 'index' parameter specifies the index to which the alias is updated, and the 'alias' parameter specifies the alias. +// If the alias already exists for a different index, it updates the alias to point to the specified index instead. +// For more information, please refer to the Redis documentation: +// [FT.ALIASUPDATE]: (https://redis.io/commands/ft.aliasupdate/) +func (c cmdable) FTAliasUpdate(ctx context.Context, index string, alias string) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.ALIASUPDATE", alias, index) + _ = c(ctx, cmd) + return cmd +} + +// FTAlter - Alters the definition of an existing index. +// The 'index' parameter specifies the index to alter, and the 'skipInitialScan' parameter specifies whether to skip the initial scan. +// The 'definition' parameter specifies the new definition for the index. +// For more information, please refer to the Redis documentation: +// [FT.ALTER]: (https://redis.io/commands/ft.alter/) +func (c cmdable) FTAlter(ctx context.Context, index string, skipInitialScan bool, definition []interface{}) *StatusCmd { + args := []interface{}{"FT.ALTER", index} + if skipInitialScan { + args = append(args, "SKIPINITIALSCAN") + } + args = append(args, "SCHEMA", "ADD") + args = append(args, definition...) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTConfigGet - Retrieves the value of a RediSearch configuration parameter. +// The 'option' parameter specifies the configuration parameter to retrieve. +// For more information, please refer to the Redis documentation: +// [FT.CONFIG GET]: (https://redis.io/commands/ft.config-get/) +func (c cmdable) FTConfigGet(ctx context.Context, option string) *MapMapStringInterfaceCmd { + cmd := NewMapMapStringInterfaceCmd(ctx, "FT.CONFIG", "GET", option) + _ = c(ctx, cmd) + return cmd +} + +// FTConfigSet - Sets the value of a RediSearch configuration parameter. +// The 'option' parameter specifies the configuration parameter to set, and the 'value' parameter specifies the new value. +// For more information, please refer to the Redis documentation: +// [FT.CONFIG SET]: (https://redis.io/commands/ft.config-set/) +func (c cmdable) FTConfigSet(ctx context.Context, option string, value interface{}) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.CONFIG", "SET", option, value) + _ = c(ctx, cmd) + return cmd +} + +// FTCreate - Creates a new index with the given options and schema. +// The 'index' parameter specifies the name of the index to create. +// The 'options' parameter specifies various options for the index, such as: +// whether to index hashes or JSONs, prefixes, filters, default language, score, score field, payload field, etc. +// The 'schema' parameter specifies the schema for the index, which includes the field name, field type, etc. +// For more information, please refer to the Redis documentation: +// [FT.CREATE]: (https://redis.io/commands/ft.create/) +func (c cmdable) FTCreate(ctx context.Context, index string, options *FTCreateOptions, schema ...*FieldSchema) *StatusCmd { + args := []interface{}{"FT.CREATE", index} + if options != nil { + if options.OnHash && !options.OnJSON { + args = append(args, "ON", "HASH") + } + if options.OnJSON && !options.OnHash { + args = append(args, "ON", "JSON") + } + if options.OnHash && options.OnJSON { + panic("FT.CREATE: ON HASH and ON JSON are mutually exclusive") + } + if options.Prefix != nil { + args = append(args, "PREFIX", len(options.Prefix)) + args = append(args, options.Prefix...) + } + if options.Filter != "" { + args = append(args, "FILTER", options.Filter) + } + if options.DefaultLanguage != "" { + args = append(args, "LANGUAGE", options.DefaultLanguage) + } + if options.LanguageField != "" { + args = append(args, "LANGUAGE_FIELD", options.LanguageField) + } + if options.Score > 0 { + args = append(args, "SCORE", options.Score) + } + if options.ScoreField != "" { + args = append(args, "SCORE_FIELD", options.ScoreField) + } + if options.PayloadField != "" { + args = append(args, "PAYLOAD_FIELD", options.PayloadField) + } + if options.MaxTextFields > 0 { + args = append(args, "MAXTEXTFIELDS", options.MaxTextFields) + } + if options.NoOffsets { + args = append(args, "NOOFFSETS") + } + if options.Temporary > 0 { + args = append(args, "TEMPORARY", options.Temporary) + } + if options.NoHL { + args = append(args, "NOHL") + } + if options.NoFields { + args = append(args, "NOFIELDS") + } + if options.NoFreqs { + args = append(args, "NOFREQS") + } + if options.StopWords != nil { + args = append(args, "STOPWORDS", len(options.StopWords)) + args = append(args, options.StopWords...) + } + if options.SkipInitialScan { + args = append(args, "SKIPINITIALSCAN") + } + } + if schema == nil { + panic("FT.CREATE: SCHEMA is required") + } + args = append(args, "SCHEMA") + for _, schema := range schema { + if schema.FieldName == "" || schema.FieldType == SearchFieldTypeInvalid { + panic("FT.CREATE: SCHEMA FieldName and FieldType are required") + } + args = append(args, schema.FieldName) + if schema.As != "" { + args = append(args, "AS", schema.As) + } + args = append(args, schema.FieldType.String()) + if schema.VectorArgs != nil { + if schema.FieldType != SearchFieldTypeVector { + panic("FT.CREATE: SCHEMA FieldType VECTOR is required for VectorArgs") + } + if schema.VectorArgs.FlatOptions != nil && schema.VectorArgs.HNSWOptions != nil { + panic("FT.CREATE: SCHEMA VectorArgs FlatOptions and HNSWOptions are mutually exclusive") + } + if schema.VectorArgs.FlatOptions != nil { + args = append(args, "FLAT") + if schema.VectorArgs.FlatOptions.Type == "" || schema.VectorArgs.FlatOptions.Dim == 0 || schema.VectorArgs.FlatOptions.DistanceMetric == "" { + panic("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR FLAT") + } + flatArgs := []interface{}{ + "TYPE", schema.VectorArgs.FlatOptions.Type, + "DIM", schema.VectorArgs.FlatOptions.Dim, + "DISTANCE_METRIC", schema.VectorArgs.FlatOptions.DistanceMetric, + } + if schema.VectorArgs.FlatOptions.InitialCapacity > 0 { + flatArgs = append(flatArgs, "INITIAL_CAP", schema.VectorArgs.FlatOptions.InitialCapacity) + } + if schema.VectorArgs.FlatOptions.BlockSize > 0 { + flatArgs = append(flatArgs, "BLOCK_SIZE", schema.VectorArgs.FlatOptions.BlockSize) + } + args = append(args, len(flatArgs)) + args = append(args, flatArgs...) + } + if schema.VectorArgs.HNSWOptions != nil { + args = append(args, "HNSW") + if schema.VectorArgs.HNSWOptions.Type == "" || schema.VectorArgs.HNSWOptions.Dim == 0 || schema.VectorArgs.HNSWOptions.DistanceMetric == "" { + panic("FT.CREATE: Type, Dim and DistanceMetric are required for VECTOR HNSW") + } + hnswArgs := []interface{}{ + "TYPE", schema.VectorArgs.HNSWOptions.Type, + "DIM", schema.VectorArgs.HNSWOptions.Dim, + "DISTANCE_METRIC", schema.VectorArgs.HNSWOptions.DistanceMetric, + } + if schema.VectorArgs.HNSWOptions.InitialCapacity > 0 { + hnswArgs = append(hnswArgs, "INITIAL_CAP", schema.VectorArgs.HNSWOptions.InitialCapacity) + } + if schema.VectorArgs.HNSWOptions.MaxEdgesPerNode > 0 { + hnswArgs = append(hnswArgs, "M", schema.VectorArgs.HNSWOptions.MaxEdgesPerNode) + } + if schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode > 0 { + hnswArgs = append(hnswArgs, "EF_CONSTRUCTION", schema.VectorArgs.HNSWOptions.MaxAllowedEdgesPerNode) + } + if schema.VectorArgs.HNSWOptions.EFRunTime > 0 { + hnswArgs = append(hnswArgs, "EF_RUNTIME", schema.VectorArgs.HNSWOptions.EFRunTime) + } + if schema.VectorArgs.HNSWOptions.Epsilon > 0 { + hnswArgs = append(hnswArgs, "EPSILON", schema.VectorArgs.HNSWOptions.Epsilon) + } + args = append(args, len(hnswArgs)) + args = append(args, hnswArgs...) + } + } + if schema.GeoShapeFieldType != "" { + if schema.FieldType != SearchFieldTypeGeoShape { + panic("FT.CREATE: SCHEMA FieldType GEOSHAPE is required for GeoShapeFieldType") + } + args = append(args, schema.GeoShapeFieldType) + } + if schema.NoStem { + args = append(args, "NOSTEM") + } + if schema.Sortable { + args = append(args, "SORTABLE") + } + if schema.UNF { + args = append(args, "UNF") + } + if schema.NoIndex { + args = append(args, "NOINDEX") + } + if schema.PhoneticMatcher != "" { + args = append(args, "PHONETIC", schema.PhoneticMatcher) + } + if schema.Weight > 0 { + args = append(args, "WEIGHT", schema.Weight) + } + if schema.Separator != "" { + args = append(args, "SEPARATOR", schema.Separator) + } + if schema.CaseSensitive { + args = append(args, "CASESENSITIVE") + } + if schema.WithSuffixtrie { + args = append(args, "WITHSUFFIXTRIE") + } + if schema.IndexEmpty { + args = append(args, "INDEXEMPTY") + } + if schema.IndexMissing { + args = append(args, "INDEXMISSING") + + } + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTCursorDel - Deletes a cursor from an existing index. +// The 'index' parameter specifies the index from which to delete the cursor, and the 'cursorId' parameter specifies the ID of the cursor to delete. +// For more information, please refer to the Redis documentation: +// [FT.CURSOR DEL]: (https://redis.io/commands/ft.cursor-del/) +func (c cmdable) FTCursorDel(ctx context.Context, index string, cursorId int) *StatusCmd { + cmd := NewStatusCmd(ctx, "FT.CURSOR", "DEL", index, cursorId) + _ = c(ctx, cmd) + return cmd +} + +// FTCursorRead - Reads the next results from an existing cursor. +// The 'index' parameter specifies the index from which to read the cursor, the 'cursorId' parameter specifies the ID of the cursor to read, and the 'count' parameter specifies the number of results to read. +// For more information, please refer to the Redis documentation: +// [FT.CURSOR READ]: (https://redis.io/commands/ft.cursor-read/) +func (c cmdable) FTCursorRead(ctx context.Context, index string, cursorId int, count int) *MapStringInterfaceCmd { + args := []interface{}{"FT.CURSOR", "READ", index, cursorId} + if count > 0 { + args = append(args, "COUNT", count) + } + cmd := NewMapStringInterfaceCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDictAdd - Adds terms to a dictionary. +// The 'dict' parameter specifies the dictionary to which to add the terms, and the 'term' parameter specifies the terms to add. +// For more information, please refer to the Redis documentation: +// [FT.DICTADD]: (https://redis.io/commands/ft.dictadd/) +func (c cmdable) FTDictAdd(ctx context.Context, dict string, term ...interface{}) *IntCmd { + args := []interface{}{"FT.DICTADD", dict} + args = append(args, term...) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDictDel - Deletes terms from a dictionary. +// The 'dict' parameter specifies the dictionary from which to delete the terms, and the 'term' parameter specifies the terms to delete. +// For more information, please refer to the Redis documentation: +// [FT.DICTDEL]: (https://redis.io/commands/ft.dictdel/) +func (c cmdable) FTDictDel(ctx context.Context, dict string, term ...interface{}) *IntCmd { + args := []interface{}{"FT.DICTDEL", dict} + args = append(args, term...) + cmd := NewIntCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDictDump - Returns all terms in the specified dictionary. +// The 'dict' parameter specifies the dictionary from which to return the terms. +// For more information, please refer to the Redis documentation: +// [FT.DICTDUMP]: (https://redis.io/commands/ft.dictdump/) +func (c cmdable) FTDictDump(ctx context.Context, dict string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "FT.DICTDUMP", dict) + _ = c(ctx, cmd) + return cmd +} + +// FTDropIndex - Deletes an index. +// The 'index' parameter specifies the index to delete. +// For more information, please refer to the Redis documentation: +// [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/) +func (c cmdable) FTDropIndex(ctx context.Context, index string) *StatusCmd { + args := []interface{}{"FT.DROPINDEX", index} + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTDropIndexWithArgs - Deletes an index with options. +// The 'index' parameter specifies the index to delete, and the 'options' parameter specifies the DeleteDocs option for docs deletion. +// For more information, please refer to the Redis documentation: +// [FT.DROPINDEX]: (https://redis.io/commands/ft.dropindex/) +func (c cmdable) FTDropIndexWithArgs(ctx context.Context, index string, options *FTDropIndexOptions) *StatusCmd { + args := []interface{}{"FT.DROPINDEX", index} + if options != nil { + if options.DeleteDocs { + args = append(args, "DD") + } + } + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTExplain - Returns the execution plan for a complex query. +// The 'index' parameter specifies the index to query, and the 'query' parameter specifies the query string. +// For more information, please refer to the Redis documentation: +// [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/) +func (c cmdable) FTExplain(ctx context.Context, index string, query string) *StringCmd { + cmd := NewStringCmd(ctx, "FT.EXPLAIN", index, query) + _ = c(ctx, cmd) + return cmd +} + +// FTExplainWithArgs - Returns the execution plan for a complex query with options. +// The 'index' parameter specifies the index to query, the 'query' parameter specifies the query string, and the 'options' parameter specifies the Dialect for the query. +// For more information, please refer to the Redis documentation: +// [FT.EXPLAIN]: (https://redis.io/commands/ft.explain/) +func (c cmdable) FTExplainWithArgs(ctx context.Context, index string, query string, options *FTExplainOptions) *StringCmd { + args := []interface{}{"FT.EXPLAIN", index, query} + if options.Dialect != "" { + args = append(args, "DIALECT", options.Dialect) + } + cmd := NewStringCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTExplainCli - Returns the execution plan for a complex query. [Not Implemented] +// For more information, see https://redis.io/commands/ft.explaincli/ +func (c cmdable) FTExplainCli(ctx context.Context, key, path string) error { + panic("not implemented") +} + +func parseFTInfo(data map[string]interface{}) (FTInfoResult, error) { + var ftInfo FTInfoResult + // Manually parse each field from the map + if indexErrors, ok := data["Index Errors"].([]interface{}); ok { + ftInfo.IndexErrors = IndexErrors{ + IndexingFailures: internal.ToInteger(indexErrors[1]), + LastIndexingError: internal.ToString(indexErrors[3]), + LastIndexingErrorKey: internal.ToString(indexErrors[5]), + } + } + + if attributes, ok := data["attributes"].([]interface{}); ok { + for _, attr := range attributes { + if attrMap, ok := attr.([]interface{}); ok { + att := FTAttribute{} + for i := 0; i < len(attrMap); i++ { + if internal.ToLower(internal.ToString(attrMap[i])) == "attribute" { + att.Attribute = internal.ToString(attrMap[i+1]) + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "identifier" { + att.Identifier = internal.ToString(attrMap[i+1]) + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "type" { + att.Type = internal.ToString(attrMap[i+1]) + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "weight" { + att.Weight = internal.ToFloat(attrMap[i+1]) + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "nostem" { + att.NoStem = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "sortable" { + att.Sortable = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "noindex" { + att.NoIndex = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "unf" { + att.UNF = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "phonetic" { + att.PhoneticMatcher = internal.ToString(attrMap[i+1]) + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "case_sensitive" { + att.CaseSensitive = true + continue + } + if internal.ToLower(internal.ToString(attrMap[i])) == "withsuffixtrie" { + att.WithSuffixtrie = true + continue + } + + } + ftInfo.Attributes = append(ftInfo.Attributes, att) + } + } + } + + ftInfo.BytesPerRecordAvg = internal.ToString(data["bytes_per_record_avg"]) + ftInfo.Cleaning = internal.ToInteger(data["cleaning"]) + + if cursorStats, ok := data["cursor_stats"].([]interface{}); ok { + ftInfo.CursorStats = CursorStats{ + GlobalIdle: internal.ToInteger(cursorStats[1]), + GlobalTotal: internal.ToInteger(cursorStats[3]), + IndexCapacity: internal.ToInteger(cursorStats[5]), + IndexTotal: internal.ToInteger(cursorStats[7]), + } + } + + if dialectStats, ok := data["dialect_stats"].([]interface{}); ok { + ftInfo.DialectStats = make(map[string]int) + for i := 0; i < len(dialectStats); i += 2 { + ftInfo.DialectStats[internal.ToString(dialectStats[i])] = internal.ToInteger(dialectStats[i+1]) + } + } + + ftInfo.DocTableSizeMB = internal.ToFloat(data["doc_table_size_mb"]) + + if fieldStats, ok := data["field statistics"].([]interface{}); ok { + for _, stat := range fieldStats { + if statMap, ok := stat.([]interface{}); ok { + ftInfo.FieldStatistics = append(ftInfo.FieldStatistics, FieldStatistic{ + Identifier: internal.ToString(statMap[1]), + Attribute: internal.ToString(statMap[3]), + IndexErrors: IndexErrors{ + IndexingFailures: internal.ToInteger(statMap[5].([]interface{})[1]), + LastIndexingError: internal.ToString(statMap[5].([]interface{})[3]), + LastIndexingErrorKey: internal.ToString(statMap[5].([]interface{})[5]), + }, + }) + } + } + } + + if gcStats, ok := data["gc_stats"].([]interface{}); ok { + ftInfo.GCStats = GCStats{} + for i := 0; i < len(gcStats); i += 2 { + if internal.ToLower(internal.ToString(gcStats[i])) == "bytes_collected" { + ftInfo.GCStats.BytesCollected = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "total_ms_run" { + ftInfo.GCStats.TotalMsRun = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "total_cycles" { + ftInfo.GCStats.TotalCycles = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "average_cycle_time_ms" { + ftInfo.GCStats.AverageCycleTimeMs = internal.ToString(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "last_run_time_ms" { + ftInfo.GCStats.LastRunTimeMs = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "gc_numeric_trees_missed" { + ftInfo.GCStats.GCNumericTreesMissed = internal.ToInteger(gcStats[i+1]) + continue + } + if internal.ToLower(internal.ToString(gcStats[i])) == "gc_blocks_denied" { + ftInfo.GCStats.GCBlocksDenied = internal.ToInteger(gcStats[i+1]) + continue + } + } + } + + ftInfo.GeoshapesSzMB = internal.ToFloat(data["geoshapes_sz_mb"]) + ftInfo.HashIndexingFailures = internal.ToInteger(data["hash_indexing_failures"]) + + if indexDef, ok := data["index_definition"].([]interface{}); ok { + ftInfo.IndexDefinition = IndexDefinition{ + KeyType: internal.ToString(indexDef[1]), + Prefixes: internal.ToStringSlice(indexDef[3]), + DefaultScore: internal.ToFloat(indexDef[5]), + } + } + + ftInfo.IndexName = internal.ToString(data["index_name"]) + ftInfo.IndexOptions = internal.ToStringSlice(data["index_options"].([]interface{})) + ftInfo.Indexing = internal.ToInteger(data["indexing"]) + ftInfo.InvertedSzMB = internal.ToFloat(data["inverted_sz_mb"]) + ftInfo.KeyTableSizeMB = internal.ToFloat(data["key_table_size_mb"]) + ftInfo.MaxDocID = internal.ToInteger(data["max_doc_id"]) + ftInfo.NumDocs = internal.ToInteger(data["num_docs"]) + ftInfo.NumRecords = internal.ToInteger(data["num_records"]) + ftInfo.NumTerms = internal.ToInteger(data["num_terms"]) + ftInfo.NumberOfUses = internal.ToInteger(data["number_of_uses"]) + ftInfo.OffsetBitsPerRecordAvg = internal.ToString(data["offset_bits_per_record_avg"]) + ftInfo.OffsetVectorsSzMB = internal.ToFloat(data["offset_vectors_sz_mb"]) + ftInfo.OffsetsPerTermAvg = internal.ToString(data["offsets_per_term_avg"]) + ftInfo.PercentIndexed = internal.ToFloat(data["percent_indexed"]) + ftInfo.RecordsPerDocAvg = internal.ToString(data["records_per_doc_avg"]) + ftInfo.SortableValuesSizeMB = internal.ToFloat(data["sortable_values_size_mb"]) + ftInfo.TagOverheadSzMB = internal.ToFloat(data["tag_overhead_sz_mb"]) + ftInfo.TextOverheadSzMB = internal.ToFloat(data["text_overhead_sz_mb"]) + ftInfo.TotalIndexMemorySzMB = internal.ToFloat(data["total_index_memory_sz_mb"]) + ftInfo.TotalIndexingTime = internal.ToInteger(data["total_indexing_time"]) + ftInfo.TotalInvertedIndexBlocks = internal.ToInteger(data["total_inverted_index_blocks"]) + ftInfo.VectorIndexSzMB = internal.ToFloat(data["vector_index_sz_mb"]) + + return ftInfo, nil +} + +type FTInfoCmd struct { + baseCmd + val FTInfoResult +} + +func newFTInfoCmd(ctx context.Context, args ...interface{}) *FTInfoCmd { + return &FTInfoCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FTInfoCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTInfoCmd) SetVal(val FTInfoResult) { + cmd.val = val +} + +func (cmd *FTInfoCmd) Result() (FTInfoResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTInfoCmd) Val() FTInfoResult { + return cmd.val +} + +func (cmd *FTInfoCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTInfoCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} +func (cmd *FTInfoCmd) readReply(rd *proto.Reader) (err error) { + n, err := rd.ReadMapLen() + if err != nil { + return err + } + + data := make(map[string]interface{}, n) + for i := 0; i < n; i++ { + k, err := rd.ReadString() + if err != nil { + return err + } + v, err := rd.ReadReply() + if err != nil { + if err == Nil { + data[k] = Nil + continue + } + if err, ok := err.(proto.RedisError); ok { + data[k] = err + continue + } + return err + } + data[k] = v + } + cmd.val, err = parseFTInfo(data) + if err != nil { + return err + } + + return nil +} + +// FTInfo - Retrieves information about an index. +// The 'index' parameter specifies the index to retrieve information about. +// For more information, please refer to the Redis documentation: +// [FT.INFO]: (https://redis.io/commands/ft.info/) +func (c cmdable) FTInfo(ctx context.Context, index string) *FTInfoCmd { + cmd := newFTInfoCmd(ctx, "FT.INFO", index) + _ = c(ctx, cmd) + return cmd +} + +// FTSpellCheck - Checks a query string for spelling errors. +// For more details about spellcheck query please follow: +// https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/ +// For more information, please refer to the Redis documentation: +// [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/) +func (c cmdable) FTSpellCheck(ctx context.Context, index string, query string) *FTSpellCheckCmd { + args := []interface{}{"FT.SPELLCHECK", index, query} + cmd := newFTSpellCheckCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTSpellCheckWithArgs - Checks a query string for spelling errors with additional options. +// For more details about spellcheck query please follow: +// https://redis.io/docs/interact/search-and-query/advanced-concepts/spellcheck/ +// For more information, please refer to the Redis documentation: +// [FT.SPELLCHECK]: (https://redis.io/commands/ft.spellcheck/) +func (c cmdable) FTSpellCheckWithArgs(ctx context.Context, index string, query string, options *FTSpellCheckOptions) *FTSpellCheckCmd { + args := []interface{}{"FT.SPELLCHECK", index, query} + if options != nil { + if options.Distance > 0 { + args = append(args, "DISTANCE", options.Distance) + } + if options.Terms != nil { + args = append(args, "TERMS", options.Terms.Inclusion, options.Terms.Dictionary) + args = append(args, options.Terms.Terms...) + } + if options.Dialect > 0 { + args = append(args, "DIALECT", options.Dialect) + } + } + cmd := newFTSpellCheckCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +type FTSpellCheckCmd struct { + baseCmd + val []SpellCheckResult +} + +func newFTSpellCheckCmd(ctx context.Context, args ...interface{}) *FTSpellCheckCmd { + return &FTSpellCheckCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FTSpellCheckCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTSpellCheckCmd) SetVal(val []SpellCheckResult) { + cmd.val = val +} + +func (cmd *FTSpellCheckCmd) Result() ([]SpellCheckResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTSpellCheckCmd) Val() []SpellCheckResult { + return cmd.val +} + +func (cmd *FTSpellCheckCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTSpellCheckCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func (cmd *FTSpellCheckCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + return err + } + cmd.val, err = parseFTSpellCheck(data) + if err != nil { + return err + } + return nil +} + +func parseFTSpellCheck(data []interface{}) ([]SpellCheckResult, error) { + results := make([]SpellCheckResult, 0, len(data)) + + for _, termData := range data { + termInfo, ok := termData.([]interface{}) + if !ok || len(termInfo) != 3 { + return nil, fmt.Errorf("invalid term format") + } + + term, ok := termInfo[1].(string) + if !ok { + return nil, fmt.Errorf("invalid term format") + } + + suggestionsData, ok := termInfo[2].([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid suggestions format") + } + + suggestions := make([]SpellCheckSuggestion, 0, len(suggestionsData)) + for _, suggestionData := range suggestionsData { + suggestionInfo, ok := suggestionData.([]interface{}) + if !ok || len(suggestionInfo) != 2 { + return nil, fmt.Errorf("invalid suggestion format") + } + + scoreStr, ok := suggestionInfo[0].(string) + if !ok { + return nil, fmt.Errorf("invalid suggestion score format") + } + score, err := strconv.ParseFloat(scoreStr, 64) + if err != nil { + return nil, fmt.Errorf("invalid suggestion score value") + } + + suggestion, ok := suggestionInfo[1].(string) + if !ok { + return nil, fmt.Errorf("invalid suggestion format") + } + + suggestions = append(suggestions, SpellCheckSuggestion{ + Score: score, + Suggestion: suggestion, + }) + } + + results = append(results, SpellCheckResult{ + Term: term, + Suggestions: suggestions, + }) + } + + return results, nil +} + +func parseFTSearch(data []interface{}, noContent, withScores, withPayloads, withSortKeys bool) (FTSearchResult, error) { + if len(data) < 1 { + return FTSearchResult{}, fmt.Errorf("unexpected search result format") + } + + total, ok := data[0].(int64) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid total results format") + } + + var results []Document + for i := 1; i < len(data); { + docID, ok := data[i].(string) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid document ID format") + } + + doc := Document{ + ID: docID, + Fields: make(map[string]string), + } + i++ + + if noContent { + results = append(results, doc) + continue + } + + if withScores && i < len(data) { + if scoreStr, ok := data[i].(string); ok { + score, err := strconv.ParseFloat(scoreStr, 64) + if err != nil { + return FTSearchResult{}, fmt.Errorf("invalid score format") + } + doc.Score = &score + i++ + } + } + + if withPayloads && i < len(data) { + if payload, ok := data[i].(string); ok { + doc.Payload = &payload + i++ + } + } + + if withSortKeys && i < len(data) { + if sortKey, ok := data[i].(string); ok { + doc.SortKey = &sortKey + i++ + } + } + + if i < len(data) { + fields, ok := data[i].([]interface{}) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid document fields format") + } + + for j := 0; j < len(fields); j += 2 { + key, ok := fields[j].(string) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid field key format") + } + value, ok := fields[j+1].(string) + if !ok { + return FTSearchResult{}, fmt.Errorf("invalid field value format") + } + doc.Fields[key] = value + } + i++ + } + + results = append(results, doc) + } + return FTSearchResult{ + Total: int(total), + Docs: results, + }, nil +} + +type FTSearchCmd struct { + baseCmd + val FTSearchResult + options *FTSearchOptions +} + +func newFTSearchCmd(ctx context.Context, options *FTSearchOptions, args ...interface{}) *FTSearchCmd { + return &FTSearchCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + options: options, + } +} + +func (cmd *FTSearchCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTSearchCmd) SetVal(val FTSearchResult) { + cmd.val = val +} + +func (cmd *FTSearchCmd) Result() (FTSearchResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTSearchCmd) Val() FTSearchResult { + return cmd.val +} + +func (cmd *FTSearchCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTSearchCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func (cmd *FTSearchCmd) readReply(rd *proto.Reader) (err error) { + data, err := rd.ReadSlice() + if err != nil { + return err + } + cmd.val, err = parseFTSearch(data, cmd.options.NoContent, cmd.options.WithScores, cmd.options.WithPayloads, cmd.options.WithSortKeys) + if err != nil { + return err + } + return nil +} + +// FTSearch - Executes a search query on an index. +// The 'index' parameter specifies the index to search, and the 'query' parameter specifies the search query. +// For more information, please refer to the Redis documentation about [FT.SEARCH]. +// +// [FT.SEARCH]: (https://redis.io/commands/ft.search/) +func (c cmdable) FTSearch(ctx context.Context, index string, query string) *FTSearchCmd { + args := []interface{}{"FT.SEARCH", index, query} + cmd := newFTSearchCmd(ctx, &FTSearchOptions{}, args...) + _ = c(ctx, cmd) + return cmd +} + +type SearchQuery []interface{} + +// FTSearchQuery - Executes a search query on an index with additional options. +// The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query, +// and the 'options' parameter specifies additional options for the search. +// For more information, please refer to the Redis documentation about [FT.SEARCH]. +// +// [FT.SEARCH]: (https://redis.io/commands/ft.search/) +func FTSearchQuery(query string, options *FTSearchOptions) SearchQuery { + queryArgs := []interface{}{query} + if options != nil { + if options.NoContent { + queryArgs = append(queryArgs, "NOCONTENT") + } + if options.Verbatim { + queryArgs = append(queryArgs, "VERBATIM") + } + if options.NoStopWords { + queryArgs = append(queryArgs, "NOSTOPWORDS") + } + if options.WithScores { + queryArgs = append(queryArgs, "WITHSCORES") + } + if options.WithPayloads { + queryArgs = append(queryArgs, "WITHPAYLOADS") + } + if options.WithSortKeys { + queryArgs = append(queryArgs, "WITHSORTKEYS") + } + if options.Filters != nil { + for _, filter := range options.Filters { + queryArgs = append(queryArgs, "FILTER", filter.FieldName, filter.Min, filter.Max) + } + } + if options.GeoFilter != nil { + for _, geoFilter := range options.GeoFilter { + queryArgs = append(queryArgs, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit) + } + } + if options.InKeys != nil { + queryArgs = append(queryArgs, "INKEYS", len(options.InKeys)) + queryArgs = append(queryArgs, options.InKeys...) + } + if options.InFields != nil { + queryArgs = append(queryArgs, "INFIELDS", len(options.InFields)) + queryArgs = append(queryArgs, options.InFields...) + } + if options.Return != nil { + queryArgs = append(queryArgs, "RETURN") + queryArgsReturn := []interface{}{} + for _, ret := range options.Return { + queryArgsReturn = append(queryArgsReturn, ret.FieldName) + if ret.As != "" { + queryArgsReturn = append(queryArgsReturn, "AS", ret.As) + } + } + queryArgs = append(queryArgs, len(queryArgsReturn)) + queryArgs = append(queryArgs, queryArgsReturn...) + } + if options.Slop > 0 { + queryArgs = append(queryArgs, "SLOP", options.Slop) + } + if options.Timeout > 0 { + queryArgs = append(queryArgs, "TIMEOUT", options.Timeout) + } + if options.InOrder { + queryArgs = append(queryArgs, "INORDER") + } + if options.Language != "" { + queryArgs = append(queryArgs, "LANGUAGE", options.Language) + } + if options.Expander != "" { + queryArgs = append(queryArgs, "EXPANDER", options.Expander) + } + if options.Scorer != "" { + queryArgs = append(queryArgs, "SCORER", options.Scorer) + } + if options.ExplainScore { + queryArgs = append(queryArgs, "EXPLAINSCORE") + } + if options.Payload != "" { + queryArgs = append(queryArgs, "PAYLOAD", options.Payload) + } + if options.SortBy != nil { + queryArgs = append(queryArgs, "SORTBY") + for _, sortBy := range options.SortBy { + queryArgs = append(queryArgs, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + panic("FT.SEARCH: ASC and DESC are mutually exclusive") + } + if sortBy.Asc { + queryArgs = append(queryArgs, "ASC") + } + if sortBy.Desc { + queryArgs = append(queryArgs, "DESC") + } + } + if options.SortByWithCount { + queryArgs = append(queryArgs, "WITHCOUNT") + } + } + if options.LimitOffset >= 0 && options.Limit > 0 { + queryArgs = append(queryArgs, "LIMIT", options.LimitOffset, options.Limit) + } + if options.Params != nil { + queryArgs = append(queryArgs, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + queryArgs = append(queryArgs, key, value) + } + } + if options.DialectVersion > 0 { + queryArgs = append(queryArgs, "DIALECT", options.DialectVersion) + } + } + return queryArgs +} + +// FTSearchWithArgs - Executes a search query on an index with additional options. +// The 'index' parameter specifies the index to search, the 'query' parameter specifies the search query, +// and the 'options' parameter specifies additional options for the search. +// For more information, please refer to the Redis documentation about [FT.SEARCH]. +// +// [FT.SEARCH]: (https://redis.io/commands/ft.search/) +func (c cmdable) FTSearchWithArgs(ctx context.Context, index string, query string, options *FTSearchOptions) *FTSearchCmd { + args := []interface{}{"FT.SEARCH", index, query} + if options != nil { + if options.NoContent { + args = append(args, "NOCONTENT") + } + if options.Verbatim { + args = append(args, "VERBATIM") + } + if options.NoStopWords { + args = append(args, "NOSTOPWORDS") + } + if options.WithScores { + args = append(args, "WITHSCORES") + } + if options.WithPayloads { + args = append(args, "WITHPAYLOADS") + } + if options.WithSortKeys { + args = append(args, "WITHSORTKEYS") + } + if options.Filters != nil { + for _, filter := range options.Filters { + args = append(args, "FILTER", filter.FieldName, filter.Min, filter.Max) + } + } + if options.GeoFilter != nil { + for _, geoFilter := range options.GeoFilter { + args = append(args, "GEOFILTER", geoFilter.FieldName, geoFilter.Longitude, geoFilter.Latitude, geoFilter.Radius, geoFilter.Unit) + } + } + if options.InKeys != nil { + args = append(args, "INKEYS", len(options.InKeys)) + args = append(args, options.InKeys...) + } + if options.InFields != nil { + args = append(args, "INFIELDS", len(options.InFields)) + args = append(args, options.InFields...) + } + if options.Return != nil { + args = append(args, "RETURN") + argsReturn := []interface{}{} + for _, ret := range options.Return { + argsReturn = append(argsReturn, ret.FieldName) + if ret.As != "" { + argsReturn = append(argsReturn, "AS", ret.As) + } + } + args = append(args, len(argsReturn)) + args = append(args, argsReturn...) + } + if options.Slop > 0 { + args = append(args, "SLOP", options.Slop) + } + if options.Timeout > 0 { + args = append(args, "TIMEOUT", options.Timeout) + } + if options.InOrder { + args = append(args, "INORDER") + } + if options.Language != "" { + args = append(args, "LANGUAGE", options.Language) + } + if options.Expander != "" { + args = append(args, "EXPANDER", options.Expander) + } + if options.Scorer != "" { + args = append(args, "SCORER", options.Scorer) + } + if options.ExplainScore { + args = append(args, "EXPLAINSCORE") + } + if options.Payload != "" { + args = append(args, "PAYLOAD", options.Payload) + } + if options.SortBy != nil { + args = append(args, "SORTBY") + for _, sortBy := range options.SortBy { + args = append(args, sortBy.FieldName) + if sortBy.Asc && sortBy.Desc { + panic("FT.SEARCH: ASC and DESC are mutually exclusive") + } + if sortBy.Asc { + args = append(args, "ASC") + } + if sortBy.Desc { + args = append(args, "DESC") + } + } + if options.SortByWithCount { + args = append(args, "WITHCOUNT") + } + } + if options.LimitOffset >= 0 && options.Limit > 0 { + args = append(args, "LIMIT", options.LimitOffset, options.Limit) + } + if options.Params != nil { + args = append(args, "PARAMS", len(options.Params)*2) + for key, value := range options.Params { + args = append(args, key, value) + } + } + if options.DialectVersion > 0 { + args = append(args, "DIALECT", options.DialectVersion) + } + } + cmd := newFTSearchCmd(ctx, options, args...) + _ = c(ctx, cmd) + return cmd +} + +func NewFTSynDumpCmd(ctx context.Context, args ...interface{}) *FTSynDumpCmd { + return &FTSynDumpCmd{ + baseCmd: baseCmd{ + ctx: ctx, + args: args, + }, + } +} + +func (cmd *FTSynDumpCmd) String() string { + return cmdString(cmd, cmd.val) +} + +func (cmd *FTSynDumpCmd) SetVal(val []FTSynDumpResult) { + cmd.val = val +} + +func (cmd *FTSynDumpCmd) Val() []FTSynDumpResult { + return cmd.val +} + +func (cmd *FTSynDumpCmd) Result() ([]FTSynDumpResult, error) { + return cmd.val, cmd.err +} + +func (cmd *FTSynDumpCmd) RawVal() interface{} { + return cmd.rawVal +} + +func (cmd *FTSynDumpCmd) RawResult() (interface{}, error) { + return cmd.rawVal, cmd.err +} + +func (cmd *FTSynDumpCmd) readReply(rd *proto.Reader) error { + termSynonymPairs, err := rd.ReadSlice() + if err != nil { + return err + } + + var results []FTSynDumpResult + for i := 0; i < len(termSynonymPairs); i += 2 { + term, ok := termSynonymPairs[i].(string) + if !ok { + return fmt.Errorf("invalid term format") + } + + synonyms, ok := termSynonymPairs[i+1].([]interface{}) + if !ok { + return fmt.Errorf("invalid synonyms format") + } + + synonymList := make([]string, len(synonyms)) + for j, syn := range synonyms { + synonym, ok := syn.(string) + if !ok { + return fmt.Errorf("invalid synonym format") + } + synonymList[j] = synonym + } + + results = append(results, FTSynDumpResult{ + Term: term, + Synonyms: synonymList, + }) + } + + cmd.val = results + return nil +} + +// FTSynDump - Dumps the contents of a synonym group. +// The 'index' parameter specifies the index to dump. +// For more information, please refer to the Redis documentation: +// [FT.SYNDUMP]: (https://redis.io/commands/ft.syndump/) +func (c cmdable) FTSynDump(ctx context.Context, index string) *FTSynDumpCmd { + cmd := NewFTSynDumpCmd(ctx, "FT.SYNDUMP", index) + _ = c(ctx, cmd) + return cmd +} + +// FTSynUpdate - Creates or updates a synonym group with additional terms. +// The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, and the 'terms' parameter specifies the additional terms. +// For more information, please refer to the Redis documentation: +// [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/) +func (c cmdable) FTSynUpdate(ctx context.Context, index string, synGroupId interface{}, terms []interface{}) *StatusCmd { + args := []interface{}{"FT.SYNUPDATE", index, synGroupId} + args = append(args, terms...) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTSynUpdateWithArgs - Creates or updates a synonym group with additional terms and options. +// The 'index' parameter specifies the index to update, the 'synGroupId' parameter specifies the synonym group id, the 'options' parameter specifies additional options for the update, and the 'terms' parameter specifies the additional terms. +// For more information, please refer to the Redis documentation: +// [FT.SYNUPDATE]: (https://redis.io/commands/ft.synupdate/) +func (c cmdable) FTSynUpdateWithArgs(ctx context.Context, index string, synGroupId interface{}, options *FTSynUpdateOptions, terms []interface{}) *StatusCmd { + args := []interface{}{"FT.SYNUPDATE", index, synGroupId} + if options.SkipInitialScan { + args = append(args, "SKIPINITIALSCAN") + } + args = append(args, terms...) + cmd := NewStatusCmd(ctx, args...) + _ = c(ctx, cmd) + return cmd +} + +// FTTagVals - Returns all distinct values indexed in a tag field. +// The 'index' parameter specifies the index to check, and the 'field' parameter specifies the tag field to retrieve values from. +// For more information, please refer to the Redis documentation: +// [FT.TAGVALS]: (https://redis.io/commands/ft.tagvals/) +func (c cmdable) FTTagVals(ctx context.Context, index string, field string) *StringSliceCmd { + cmd := NewStringSliceCmd(ctx, "FT.TAGVALS", index, field) + _ = c(ctx, cmd) + return cmd +} + +// type FTProfileResult struct { +// Results []interface{} +// Profile ProfileDetails +// } + +// type ProfileDetails struct { +// TotalProfileTime string +// ParsingTime string +// PipelineCreationTime string +// Warning string +// IteratorsProfile []IteratorProfile +// ResultProcessorsProfile []ResultProcessorProfile +// } + +// type IteratorProfile struct { +// Type string +// QueryType string +// Time interface{} +// Counter int +// Term string +// Size int +// ChildIterators []IteratorProfile +// } + +// type ResultProcessorProfile struct { +// Type string +// Time interface{} +// Counter int +// } + +// func parseFTProfileResult(data []interface{}) (FTProfileResult, error) { +// var result FTProfileResult +// if len(data) < 2 { +// return result, fmt.Errorf("unexpected data length") +// } + +// // Parse results +// result.Results = data[0].([]interface{}) + +// // Parse profile details +// profileData := data[1].([]interface{}) +// profileDetails := ProfileDetails{} +// for i := 0; i < len(profileData); i += 2 { +// switch profileData[i].(string) { +// case "Total profile time": +// profileDetails.TotalProfileTime = profileData[i+1].(string) +// case "Parsing time": +// profileDetails.ParsingTime = profileData[i+1].(string) +// case "Pipeline creation time": +// profileDetails.PipelineCreationTime = profileData[i+1].(string) +// case "Warning": +// profileDetails.Warning = profileData[i+1].(string) +// case "Iterators profile": +// profileDetails.IteratorsProfile = parseIteratorsProfile(profileData[i+1].([]interface{})) +// case "Result processors profile": +// profileDetails.ResultProcessorsProfile = parseResultProcessorsProfile(profileData[i+1].([]interface{})) +// } +// } + +// result.Profile = profileDetails +// return result, nil +// } + +// func parseIteratorsProfile(data []interface{}) []IteratorProfile { +// var iterators []IteratorProfile +// for _, item := range data { +// profile := item.([]interface{}) +// iterator := IteratorProfile{} +// for i := 0; i < len(profile); i += 2 { +// switch profile[i].(string) { +// case "Type": +// iterator.Type = profile[i+1].(string) +// case "Query type": +// iterator.QueryType = profile[i+1].(string) +// case "Time": +// iterator.Time = profile[i+1] +// case "Counter": +// iterator.Counter = int(profile[i+1].(int64)) +// case "Term": +// iterator.Term = profile[i+1].(string) +// case "Size": +// iterator.Size = int(profile[i+1].(int64)) +// case "Child iterators": +// iterator.ChildIterators = parseChildIteratorsProfile(profile[i+1].([]interface{})) +// } +// } +// iterators = append(iterators, iterator) +// } +// return iterators +// } + +// func parseChildIteratorsProfile(data []interface{}) []IteratorProfile { +// var iterators []IteratorProfile +// for _, item := range data { +// profile := item.([]interface{}) +// iterator := IteratorProfile{} +// for i := 0; i < len(profile); i += 2 { +// switch profile[i].(string) { +// case "Type": +// iterator.Type = profile[i+1].(string) +// case "Query type": +// iterator.QueryType = profile[i+1].(string) +// case "Time": +// iterator.Time = profile[i+1] +// case "Counter": +// iterator.Counter = int(profile[i+1].(int64)) +// case "Term": +// iterator.Term = profile[i+1].(string) +// case "Size": +// iterator.Size = int(profile[i+1].(int64)) +// } +// } +// iterators = append(iterators, iterator) +// } +// return iterators +// } + +// func parseResultProcessorsProfile(data []interface{}) []ResultProcessorProfile { +// var processors []ResultProcessorProfile +// for _, item := range data { +// profile := item.([]interface{}) +// processor := ResultProcessorProfile{} +// for i := 0; i < len(profile); i += 2 { +// switch profile[i].(string) { +// case "Type": +// processor.Type = profile[i+1].(string) +// case "Time": +// processor.Time = profile[i+1] +// case "Counter": +// processor.Counter = int(profile[i+1].(int64)) +// } +// } +// processors = append(processors, processor) +// } +// return processors +// } + +// func NewFTProfileCmd(ctx context.Context, args ...interface{}) *FTProfileCmd { +// return &FTProfileCmd{ +// baseCmd: baseCmd{ +// ctx: ctx, +// args: args, +// }, +// } +// } + +// type FTProfileCmd struct { +// baseCmd +// val FTProfileResult +// } + +// func (cmd *FTProfileCmd) String() string { +// return cmdString(cmd, cmd.val) +// } + +// func (cmd *FTProfileCmd) SetVal(val FTProfileResult) { +// cmd.val = val +// } + +// func (cmd *FTProfileCmd) Result() (FTProfileResult, error) { +// return cmd.val, cmd.err +// } + +// func (cmd *FTProfileCmd) Val() FTProfileResult { +// return cmd.val +// } + +// func (cmd *FTProfileCmd) readReply(rd *proto.Reader) (err error) { +// data, err := rd.ReadSlice() +// if err != nil { +// return err +// } +// cmd.val, err = parseFTProfileResult(data) +// if err != nil { +// cmd.err = err +// } +// return nil +// } + +// // FTProfile - Executes a search query and returns a profile of how the query was processed. +// // The 'index' parameter specifies the index to search, the 'limited' parameter specifies whether to limit the results, +// // and the 'query' parameter specifies the search / aggreagte query. Please notice that you must either pass a SearchQuery or an AggregateQuery. +// // For more information, please refer to the Redis documentation: +// // [FT.PROFILE]: (https://redis.io/commands/ft.profile/) +// func (c cmdable) FTProfile(ctx context.Context, index string, limited bool, query interface{}) *FTProfileCmd { +// queryType := "" +// var argsQuery []interface{} + +// switch v := query.(type) { +// case AggregateQuery: +// queryType = "AGGREGATE" +// argsQuery = v +// case SearchQuery: +// queryType = "SEARCH" +// argsQuery = v +// default: +// panic("FT.PROFILE: query must be either AggregateQuery or SearchQuery") +// } + +// args := []interface{}{"FT.PROFILE", index, queryType} + +// if limited { +// args = append(args, "LIMITED") +// } +// args = append(args, "QUERY") +// args = append(args, argsQuery...) + +// cmd := NewFTProfileCmd(ctx, args...) +// _ = c(ctx, cmd) +// return cmd +// } diff --git a/vendor/github.com/redis/go-redis/v9/sentinel.go b/vendor/github.com/redis/go-redis/v9/sentinel.go index 188f88494..315695544 100644 --- a/vendor/github.com/redis/go-redis/v9/sentinel.go +++ b/vendor/github.com/redis/go-redis/v9/sentinel.go @@ -82,6 +82,7 @@ type FailoverOptions struct { DisableIndentity bool IdentitySuffix string + UnstableResp3 bool } func (opt *FailoverOptions) clientOptions() *Options { @@ -119,6 +120,7 @@ func (opt *FailoverOptions) clientOptions() *Options { DisableIndentity: opt.DisableIndentity, IdentitySuffix: opt.IdentitySuffix, + UnstableResp3: opt.UnstableResp3, } } @@ -156,6 +158,7 @@ func (opt *FailoverOptions) sentinelOptions(addr string) *Options { DisableIndentity: opt.DisableIndentity, IdentitySuffix: opt.IdentitySuffix, + UnstableResp3: opt.UnstableResp3, } } diff --git a/vendor/github.com/redis/go-redis/v9/timeseries_commands.go b/vendor/github.com/redis/go-redis/v9/timeseries_commands.go index 6f1b2fa45..82d8cdfcf 100644 --- a/vendor/github.com/redis/go-redis/v9/timeseries_commands.go +++ b/vendor/github.com/redis/go-redis/v9/timeseries_commands.go @@ -40,25 +40,32 @@ type TimeseriesCmdable interface { } type TSOptions struct { - Retention int - ChunkSize int - Encoding string - DuplicatePolicy string - Labels map[string]string + Retention int + ChunkSize int + Encoding string + DuplicatePolicy string + Labels map[string]string + IgnoreMaxTimeDiff int64 + IgnoreMaxValDiff float64 } type TSIncrDecrOptions struct { - Timestamp int64 - Retention int - ChunkSize int - Uncompressed bool - Labels map[string]string + Timestamp int64 + Retention int + ChunkSize int + Uncompressed bool + DuplicatePolicy string + Labels map[string]string + IgnoreMaxTimeDiff int64 + IgnoreMaxValDiff float64 } type TSAlterOptions struct { - Retention int - ChunkSize int - DuplicatePolicy string - Labels map[string]string + Retention int + ChunkSize int + DuplicatePolicy string + Labels map[string]string + IgnoreMaxTimeDiff int64 + IgnoreMaxValDiff float64 } type TSCreateRuleOptions struct { @@ -223,6 +230,9 @@ func (c cmdable) TSAddWithArgs(ctx context.Context, key string, timestamp interf args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) @@ -264,6 +274,9 @@ func (c cmdable) TSCreateWithArgs(ctx context.Context, key string, options *TSOp args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) @@ -292,6 +305,9 @@ func (c cmdable) TSAlter(ctx context.Context, key string, options *TSAlterOption args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewStatusCmd(ctx, args...) _ = c(ctx, cmd) @@ -351,12 +367,18 @@ func (c cmdable) TSIncrByWithArgs(ctx context.Context, key string, timestamp flo if options.Uncompressed { args = append(args, "UNCOMPRESSED") } + if options.DuplicatePolicy != "" { + args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy) + } if options.Labels != nil { args = append(args, "LABELS") for label, value := range options.Labels { args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) @@ -391,12 +413,18 @@ func (c cmdable) TSDecrByWithArgs(ctx context.Context, key string, timestamp flo if options.Uncompressed { args = append(args, "UNCOMPRESSED") } + if options.DuplicatePolicy != "" { + args = append(args, "DUPLICATE_POLICY", options.DuplicatePolicy) + } if options.Labels != nil { args = append(args, "LABELS") for label, value := range options.Labels { args = append(args, label, value) } } + if options.IgnoreMaxTimeDiff != 0 || options.IgnoreMaxValDiff != 0 { + args = append(args, "IGNORE", options.IgnoreMaxTimeDiff, options.IgnoreMaxValDiff) + } } cmd := NewIntCmd(ctx, args...) _ = c(ctx, cmd) diff --git a/vendor/github.com/redis/go-redis/v9/universal.go b/vendor/github.com/redis/go-redis/v9/universal.go index 275bef3d6..47fda2769 100644 --- a/vendor/github.com/redis/go-redis/v9/universal.go +++ b/vendor/github.com/redis/go-redis/v9/universal.go @@ -68,6 +68,7 @@ type UniversalOptions struct { DisableIndentity bool IdentitySuffix string + UnstableResp3 bool } // Cluster returns cluster options created from the universal options. @@ -114,6 +115,7 @@ func (o *UniversalOptions) Cluster() *ClusterOptions { DisableIndentity: o.DisableIndentity, IdentitySuffix: o.IdentitySuffix, + UnstableResp3: o.UnstableResp3, } } @@ -160,6 +162,7 @@ func (o *UniversalOptions) Failover() *FailoverOptions { DisableIndentity: o.DisableIndentity, IdentitySuffix: o.IdentitySuffix, + UnstableResp3: o.UnstableResp3, } } @@ -203,6 +206,7 @@ func (o *UniversalOptions) Simple() *Options { DisableIndentity: o.DisableIndentity, IdentitySuffix: o.IdentitySuffix, + UnstableResp3: o.UnstableResp3, } } diff --git a/vendor/github.com/redis/go-redis/v9/version.go b/vendor/github.com/redis/go-redis/v9/version.go index b1234dac3..a447a546d 100644 --- a/vendor/github.com/redis/go-redis/v9/version.go +++ b/vendor/github.com/redis/go-redis/v9/version.go @@ -2,5 +2,5 @@ package redis // Version is the current release version. func Version() string { - return "9.6.1" + return "9.7.1" } diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 781770c20..48dbb9d84 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -5,15 +5,6 @@ oauth2 package contains a client implementation for OAuth 2.0 spec. -## Installation - -~~~~ -go get golang.org/x/oauth2 -~~~~ - -Or you can manually git clone the repository to -`$(go env GOPATH)/src/golang.org/x/oauth2`. - See pkg.go.dev for further documentation and examples. * [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) @@ -33,7 +24,11 @@ The main issue tracker for the oauth2 repository is located at https://github.com/golang/oauth2/issues. This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. In particular: +this repository, see https://go.dev/doc/contribute. + +The git repository is https://go.googlesource.com/oauth2. + +Note: * Excluding trivial changes, all contributions should be connected to an existing issue. * API changes must go through the [change proposal process](https://go.dev/s/proposal-process) before they can be accepted. diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go new file mode 100644 index 000000000..fd8632444 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/key.go @@ -0,0 +1,205 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +// Package registry provides access to the Windows registry. +// +// Here is a simple example, opening a registry key and reading a string value from it. +// +// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) +// if err != nil { +// log.Fatal(err) +// } +// defer k.Close() +// +// s, _, err := k.GetStringValue("SystemRoot") +// if err != nil { +// log.Fatal(err) +// } +// fmt.Printf("Windows system root is %q\n", s) +package registry + +import ( + "io" + "runtime" + "syscall" + "time" +) + +const ( + // Registry key security and access rights. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx + // for details. + ALL_ACCESS = 0xf003f + CREATE_LINK = 0x00020 + CREATE_SUB_KEY = 0x00004 + ENUMERATE_SUB_KEYS = 0x00008 + EXECUTE = 0x20019 + NOTIFY = 0x00010 + QUERY_VALUE = 0x00001 + READ = 0x20019 + SET_VALUE = 0x00002 + WOW64_32KEY = 0x00200 + WOW64_64KEY = 0x00100 + WRITE = 0x20006 +) + +// Key is a handle to an open Windows registry key. +// Keys can be obtained by calling OpenKey; there are +// also some predefined root keys such as CURRENT_USER. +// Keys can be used directly in the Windows API. +type Key syscall.Handle + +const ( + // Windows defines some predefined root keys that are always open. + // An application can use these keys as entry points to the registry. + // Normally these keys are used in OpenKey to open new keys, + // but they can also be used anywhere a Key is required. + CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT) + CURRENT_USER = Key(syscall.HKEY_CURRENT_USER) + LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE) + USERS = Key(syscall.HKEY_USERS) + CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG) + PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA) +) + +// Close closes open key k. +func (k Key) Close() error { + return syscall.RegCloseKey(syscall.Handle(k)) +} + +// OpenKey opens a new key with path name relative to key k. +// It accepts any open key, including CURRENT_USER and others, +// and returns the new key and an error. +// The access parameter specifies desired access rights to the +// key to be opened. +func OpenKey(k Key, path string, access uint32) (Key, error) { + p, err := syscall.UTF16PtrFromString(path) + if err != nil { + return 0, err + } + var subkey syscall.Handle + err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey) + if err != nil { + return 0, err + } + return Key(subkey), nil +} + +// OpenRemoteKey opens a predefined registry key on another +// computer pcname. The key to be opened is specified by k, but +// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS. +// If pcname is "", OpenRemoteKey returns local computer key. +func OpenRemoteKey(pcname string, k Key) (Key, error) { + var err error + var p *uint16 + if pcname != "" { + p, err = syscall.UTF16PtrFromString(`\\` + pcname) + if err != nil { + return 0, err + } + } + var remoteKey syscall.Handle + err = regConnectRegistry(p, syscall.Handle(k), &remoteKey) + if err != nil { + return 0, err + } + return Key(remoteKey), nil +} + +// ReadSubKeyNames returns the names of subkeys of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadSubKeyNames(n int) ([]string, error) { + // RegEnumKeyEx must be called repeatedly and to completion. + // During this time, this goroutine cannot migrate away from + // its current thread. See https://golang.org/issue/49320 and + // https://golang.org/issue/49466. + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + names := make([]string, 0) + // Registry key size limit is 255 bytes and described there: + // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx + buf := make([]uint16, 256) //plus extra room for terminating zero byte +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} + +// CreateKey creates a key named path under open key k. +// CreateKey returns the new key and a boolean flag that reports +// whether the key already existed. +// The access parameter specifies the access rights for the key +// to be created. +func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) { + var h syscall.Handle + var d uint32 + err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path), + 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d) + if err != nil { + return 0, false, err + } + return Key(h), d == _REG_OPENED_EXISTING_KEY, nil +} + +// DeleteKey deletes the subkey path of key k and its values. +func DeleteKey(k Key, path string) error { + return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path)) +} + +// A KeyInfo describes the statistics of a key. It is returned by Stat. +type KeyInfo struct { + SubKeyCount uint32 + MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte + ValueCount uint32 + MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte + MaxValueLen uint32 // longest data component among the key's values, in bytes + lastWriteTime syscall.Filetime +} + +// ModTime returns the key's last write time. +func (ki *KeyInfo) ModTime() time.Time { + return time.Unix(0, ki.lastWriteTime.Nanoseconds()) +} + +// Stat retrieves information about the open key k. +func (k Key) Stat() (*KeyInfo, error) { + var ki KeyInfo + err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil, + &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount, + &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime) + if err != nil { + return nil, err + } + return &ki, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go new file mode 100644 index 000000000..bbf86ccf0 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go @@ -0,0 +1,9 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build generate + +package registry + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go new file mode 100644 index 000000000..f533091c1 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/syscall.go @@ -0,0 +1,32 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import "syscall" + +const ( + _REG_OPTION_NON_VOLATILE = 0 + + _REG_CREATED_NEW_KEY = 1 + _REG_OPENED_EXISTING_KEY = 2 + + _ERROR_NO_MORE_ITEMS syscall.Errno = 259 +) + +func LoadRegLoadMUIString() error { + return procRegLoadMUIStringW.Find() +} + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW +//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW +//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW +//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW +//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW +//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW +//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW + +//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go new file mode 100644 index 000000000..74db26b94 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/value.go @@ -0,0 +1,386 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package registry + +import ( + "errors" + "io" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + // Registry value types. + NONE = 0 + SZ = 1 + EXPAND_SZ = 2 + BINARY = 3 + DWORD = 4 + DWORD_BIG_ENDIAN = 5 + LINK = 6 + MULTI_SZ = 7 + RESOURCE_LIST = 8 + FULL_RESOURCE_DESCRIPTOR = 9 + RESOURCE_REQUIREMENTS_LIST = 10 + QWORD = 11 +) + +var ( + // ErrShortBuffer is returned when the buffer was too short for the operation. + ErrShortBuffer = syscall.ERROR_MORE_DATA + + // ErrNotExist is returned when a registry key or value does not exist. + ErrNotExist = syscall.ERROR_FILE_NOT_FOUND + + // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected. + ErrUnexpectedType = errors.New("unexpected key value type") +) + +// GetValue retrieves the type and data for the specified value associated +// with an open key k. It fills up buffer buf and returns the retrieved +// byte count n. If buf is too small to fit the stored value it returns +// ErrShortBuffer error along with the required buffer size n. +// If no buffer is provided, it returns true and actual buffer size n. +// If no buffer is provided, GetValue returns the value's type only. +// If the value does not exist, the error returned is ErrNotExist. +// +// GetValue is a low level function. If value's type is known, use the appropriate +// Get*Value function instead. +func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return 0, 0, err + } + var pbuf *byte + if len(buf) > 0 { + pbuf = (*byte)(unsafe.Pointer(&buf[0])) + } + l := uint32(len(buf)) + err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l) + if err != nil { + return int(l), valtype, err + } + return int(l), valtype, nil +} + +func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return nil, 0, err + } + var t uint32 + n := uint32(len(buf)) + for { + err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n) + if err == nil { + return buf[:n], t, nil + } + if err != syscall.ERROR_MORE_DATA { + return nil, 0, err + } + if n <= uint32(len(buf)) { + return nil, 0, err + } + buf = make([]byte, n) + } +} + +// GetStringValue retrieves the string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringValue returns ErrNotExist. +// If value is not SZ or EXPAND_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return "", typ, err2 + } + switch typ { + case SZ, EXPAND_SZ: + default: + return "", typ, ErrUnexpectedType + } + if len(data) == 0 { + return "", typ, nil + } + u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + return syscall.UTF16ToString(u), typ, nil +} + +// GetMUIStringValue retrieves the localized string value for +// the specified value name associated with an open key k. +// If the value name doesn't exist or the localized string value +// can't be resolved, GetMUIStringValue returns ErrNotExist. +// GetMUIStringValue panics if the system doesn't support +// regLoadMUIString; use LoadRegLoadMUIString to check if +// regLoadMUIString is supported before calling this function. +func (k Key) GetMUIStringValue(name string) (string, error) { + pname, err := syscall.UTF16PtrFromString(name) + if err != nil { + return "", err + } + + buf := make([]uint16, 1024) + var buflen uint32 + var pdir *uint16 + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path + + // Try to resolve the string value using the system directory as + // a DLL search path; this assumes the string value is of the form + // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320. + + // This approach works with tzres.dll but may have to be revised + // in the future to allow callers to provide custom search paths. + + var s string + s, err = ExpandString("%SystemRoot%\\system32\\") + if err != nil { + return "", err + } + pdir, err = syscall.UTF16PtrFromString(s) + if err != nil { + return "", err + } + + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed + if buflen <= uint32(len(buf)) { + break // Buffer not growing, assume race; break + } + buf = make([]uint16, buflen) + err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir) + } + + if err != nil { + return "", err + } + + return syscall.UTF16ToString(buf), nil +} + +// ExpandString expands environment-variable strings and replaces +// them with the values defined for the current user. +// Use ExpandString to expand EXPAND_SZ strings. +func ExpandString(value string) (string, error) { + if value == "" { + return "", nil + } + p, err := syscall.UTF16PtrFromString(value) + if err != nil { + return "", err + } + r := make([]uint16, 100) + for { + n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r))) + if err != nil { + return "", err + } + if n <= uint32(len(r)) { + return syscall.UTF16ToString(r[:n]), nil + } + r = make([]uint16, n) + } +} + +// GetStringsValue retrieves the []string value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetStringsValue returns ErrNotExist. +// If value is not MULTI_SZ, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != MULTI_SZ { + return nil, typ, ErrUnexpectedType + } + if len(data) == 0 { + return nil, typ, nil + } + p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2] + if len(p) == 0 { + return nil, typ, nil + } + if p[len(p)-1] == 0 { + p = p[:len(p)-1] // remove terminating null + } + val = make([]string, 0, 5) + from := 0 + for i, c := range p { + if c == 0 { + val = append(val, string(utf16.Decode(p[from:i]))) + from = i + 1 + } + } + return val, typ, nil +} + +// GetIntegerValue retrieves the integer value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetIntegerValue returns ErrNotExist. +// If value is not DWORD or QWORD, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 8)) + if err2 != nil { + return 0, typ, err2 + } + switch typ { + case DWORD: + if len(data) != 4 { + return 0, typ, errors.New("DWORD value is not 4 bytes long") + } + var val32 uint32 + copy((*[4]byte)(unsafe.Pointer(&val32))[:], data) + return uint64(val32), DWORD, nil + case QWORD: + if len(data) != 8 { + return 0, typ, errors.New("QWORD value is not 8 bytes long") + } + copy((*[8]byte)(unsafe.Pointer(&val))[:], data) + return val, QWORD, nil + default: + return 0, typ, ErrUnexpectedType + } +} + +// GetBinaryValue retrieves the binary value for the specified +// value name associated with an open key k. It also returns the value's type. +// If value does not exist, GetBinaryValue returns ErrNotExist. +// If value is not BINARY, it will return the correct value +// type and ErrUnexpectedType. +func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) { + data, typ, err2 := k.getValue(name, make([]byte, 64)) + if err2 != nil { + return nil, typ, err2 + } + if typ != BINARY { + return nil, typ, ErrUnexpectedType + } + return data, typ, nil +} + +func (k Key) setValue(name string, valtype uint32, data []byte) error { + p, err := syscall.UTF16PtrFromString(name) + if err != nil { + return err + } + if len(data) == 0 { + return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0) + } + return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data))) +} + +// SetDWordValue sets the data and type of a name value +// under key k to value and DWORD. +func (k Key) SetDWordValue(name string, value uint32) error { + return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:]) +} + +// SetQWordValue sets the data and type of a name value +// under key k to value and QWORD. +func (k Key) SetQWordValue(name string, value uint64) error { + return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:]) +} + +func (k Key) setStringValue(name string, valtype uint32, value string) error { + v, err := syscall.UTF16FromString(value) + if err != nil { + return err + } + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, valtype, buf) +} + +// SetStringValue sets the data and type of a name value +// under key k to value and SZ. The value must not contain a zero byte. +func (k Key) SetStringValue(name, value string) error { + return k.setStringValue(name, SZ, value) +} + +// SetExpandStringValue sets the data and type of a name value +// under key k to value and EXPAND_SZ. The value must not contain a zero byte. +func (k Key) SetExpandStringValue(name, value string) error { + return k.setStringValue(name, EXPAND_SZ, value) +} + +// SetStringsValue sets the data and type of a name value +// under key k to value and MULTI_SZ. The value strings +// must not contain a zero byte. +func (k Key) SetStringsValue(name string, value []string) error { + ss := "" + for _, s := range value { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return errors.New("string cannot have 0 inside") + } + } + ss += s + "\x00" + } + v := utf16.Encode([]rune(ss + "\x00")) + buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2] + return k.setValue(name, MULTI_SZ, buf) +} + +// SetBinaryValue sets the data and type of a name value +// under key k to value and BINARY. +func (k Key) SetBinaryValue(name string, value []byte) error { + return k.setValue(name, BINARY, value) +} + +// DeleteValue removes a named value from the key k. +func (k Key) DeleteValue(name string) error { + return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name)) +} + +// ReadValueNames returns the value names of key k. +// The parameter n controls the number of returned names, +// analogous to the way os.File.Readdirnames works. +func (k Key) ReadValueNames(n int) ([]string, error) { + ki, err := k.Stat() + if err != nil { + return nil, err + } + names := make([]string, 0, ki.ValueCount) + buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character +loopItems: + for i := uint32(0); ; i++ { + if n > 0 { + if len(names) == n { + return names, nil + } + } + l := uint32(len(buf)) + for { + err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil) + if err == nil { + break + } + if err == syscall.ERROR_MORE_DATA { + // Double buffer size and try again. + l = uint32(2 * len(buf)) + buf = make([]uint16, l) + continue + } + if err == _ERROR_NO_MORE_ITEMS { + break loopItems + } + return names, err + } + names = append(names, syscall.UTF16ToString(buf[:l])) + } + if n > len(names) { + return names, io.EOF + } + return names, nil +} diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go new file mode 100644 index 000000000..fc1835d8a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go @@ -0,0 +1,117 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package registry + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW") + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") + procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW") + procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW") + procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW") + procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW") + procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW") + procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") +) + +func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegConnectRegistryW.Addr(), 3, uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteKeyW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(subkey)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegDeleteValueW.Addr(), 2, uintptr(key), uintptr(unsafe.Pointer(name)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumValueW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegLoadMUIStringW.Addr(), 7, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegSetValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize)) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size)) + n = uint32(r0) + if n == 0 { + err = errnoErr(e1) + } + return +} diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go index 0d9a824ce..e524fdd40 100644 --- a/vendor/google.golang.org/grpc/grpclog/internal/logger.go +++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go @@ -81,7 +81,7 @@ func (l *LoggerWrapper) Errorf(format string, args ...any) { } // V reports whether verbosity level l is at least the requested verbose level. -func (*LoggerWrapper) V(l int) bool { +func (*LoggerWrapper) V(int) bool { // Returns true for all verbose level. return true } diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index 73fa407b6..20b4dc3d3 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -183,7 +183,7 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. - GRPCResolverSchemeExtraMetadata string = "xds" + GRPCResolverSchemeExtraMetadata = "xds" // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. EnterIdleModeForTesting any // func(*grpc.ClientConn) @@ -191,6 +191,8 @@ var ( // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. ExitIdleModeForTesting any // func(*grpc.ClientConn) error + // ChannelzTurnOffForTesting disables the Channelz service for testing + // purposes. ChannelzTurnOffForTesting func() // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to @@ -203,11 +205,7 @@ var ( // UserSetDefaultScheme is set to true if the user has overridden the // default resolver scheme. - UserSetDefaultScheme bool = false - - // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n - // is the number of elements. swap swaps the elements with indexes i and j. - ShuffleAddressListForTesting any // func(n int, swap func(i, j int)) + UserSetDefaultScheme = false // ConnectedAddress returns the connected address for a SubConnState. The // address is only valid if the state is READY. @@ -235,7 +233,7 @@ var ( // // The implementation is expected to create a health checking RPC stream by // calling newStream(), watch for the health status of serviceName, and report -// it's health back by calling setConnectivityState(). +// its health back by calling setConnectivityState(). // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index c7dbc8205..1186f1e9a 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -138,17 +138,19 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) { // s.Code() != OK implies that s.Proto() != nil. p := s.Proto() for _, detail := range details { - any, err := anypb.New(protoadapt.MessageV2Of(detail)) + m, err := anypb.New(protoadapt.MessageV2Of(detail)) if err != nil { return nil, err } - p.Details = append(p.Details, any) + p.Details = append(p.Details, m) } return &Status{s: p}, nil } // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. +// If the detail can be decoded, the proto message returned is of the same +// type that was given to WithDetails(). func (s *Status) Details() []any { if s == nil || s.s == nil { return nil @@ -160,7 +162,38 @@ func (s *Status) Details() []any { details = append(details, err) continue } - details = append(details, detail) + // The call to MessageV1Of is required to unwrap the proto message if + // it implemented only the MessageV1 API. The proto message would have + // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are + // added to a global registry used by any.UnmarshalNew(). + // MessageV1Of has the following behaviour: + // 1. If the given message is a wrapped MessageV1, it returns the + // unwrapped value. + // 2. If the given message already implements MessageV1, it returns it + // as is. + // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper. + // + // Since the Status.WithDetails() API only accepts MessageV1, calling + // MessageV1Of ensures we return the same type that was given to + // WithDetails: + // * If the give type implemented only MessageV1, the unwrapping from + // point 1 above will restore the type. + // * If the given type implemented both MessageV1 and MessageV2, point 2 + // above will ensure no wrapping is performed. + // * If the given type implemented only MessageV2 and was wrapped using + // MessageV1Of before passing to WithDetails(), it would be unwrapped + // in WithDetails by calling MessageV2Of(). Point 3 above will ensure + // that the type is wrapped in a MessageV1 wrapper again before + // returning. Note that protoc-gen-go doesn't generate code which + // implements ONLY MessageV2 at the time of writing. + // + // NOTE: Status details can also be added using the FromProto method. + // This could theoretically allow passing a Detail message that only + // implements the V2 API. In such a case the message will be wrapped in + // a MessageV1 wrapper when fetched using Details(). + // Since protoc-gen-go generates only code that implements both V1 and + // V2 APIs for backward compatibility, this is not a concern. + details = append(details, protoadapt.MessageV1Of(detail)) } return details } diff --git a/vendor/modules.txt b/vendor/modules.txt index 34475d79d..2c908cedb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,5 +1,5 @@ -# cloud.google.com/go/compute/metadata v0.3.0 -## explicit; go 1.19 +# cloud.google.com/go/compute/metadata v0.5.0 +## explicit; go 1.20 cloud.google.com/go/compute/metadata # code.gitea.io/sdk/gitea v0.20.0 ## explicit; go 1.18 @@ -65,8 +65,9 @@ github.com/ProtonMail/go-crypto/openpgp/x448 ## explicit; go 1.22.0 github.com/argoproj-labs/argocd-operator/api/v1beta1 github.com/argoproj-labs/argocd-operator/common -# github.com/argoproj/argo-cd/v2 v2.13.3 +# github.com/argoproj/argo-cd/v2 v2.14.9 ## explicit; go 1.22.0 +github.com/argoproj/argo-cd/v2/assets github.com/argoproj/argo-cd/v2/common github.com/argoproj/argo-cd/v2/pkg/apis/application github.com/argoproj/argo-cd/v2/pkg/apis/application/v1alpha1 @@ -75,9 +76,9 @@ github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/fake github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/scheme github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1 github.com/argoproj/argo-cd/v2/pkg/client/clientset/versioned/typed/application/v1alpha1/fake +github.com/argoproj/argo-cd/v2/util/assets github.com/argoproj/argo-cd/v2/util/cache github.com/argoproj/argo-cd/v2/util/cert -github.com/argoproj/argo-cd/v2/util/collections github.com/argoproj/argo-cd/v2/util/config github.com/argoproj/argo-cd/v2/util/env github.com/argoproj/argo-cd/v2/util/exec @@ -88,11 +89,13 @@ github.com/argoproj/argo-cd/v2/util/http github.com/argoproj/argo-cd/v2/util/io github.com/argoproj/argo-cd/v2/util/io/files github.com/argoproj/argo-cd/v2/util/io/path +github.com/argoproj/argo-cd/v2/util/jwt github.com/argoproj/argo-cd/v2/util/log github.com/argoproj/argo-cd/v2/util/proxy +github.com/argoproj/argo-cd/v2/util/rbac github.com/argoproj/argo-cd/v2/util/regex github.com/argoproj/argo-cd/v2/util/security -# github.com/argoproj/gitops-engine v0.7.1-0.20240905010810-bd7681ae3f8b +# github.com/argoproj/gitops-engine v0.7.1-0.20250328191959-6d3cf122b03f ## explicit; go 1.22.0 github.com/argoproj/gitops-engine/internal/kubernetes_vendor/pkg/api/v1/endpoints github.com/argoproj/gitops-engine/internal/kubernetes_vendor/pkg/util/hash @@ -112,14 +115,13 @@ github.com/argoproj/gitops-engine/pkg/utils/tracing github.com/argoproj/pkg/exec github.com/argoproj/pkg/rand github.com/argoproj/pkg/sync -github.com/argoproj/pkg/time # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile # github.com/blang/semver/v4 v4.0.0 ## explicit; go 1.14 github.com/blang/semver/v4 -# github.com/bmatcuk/doublestar/v4 v4.6.1 +# github.com/bmatcuk/doublestar/v4 v4.7.1 ## explicit; go 1.16 github.com/bmatcuk/doublestar/v4 # github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 @@ -127,9 +129,27 @@ github.com/bmatcuk/doublestar/v4 # github.com/bombsimon/logrusr/v2 v2.0.1 ## explicit; go 1.13 github.com/bombsimon/logrusr/v2 -# github.com/bradleyfalzon/ghinstallation/v2 v2.11.0 -## explicit; go 1.16 +# github.com/bradleyfalzon/ghinstallation/v2 v2.12.0 +## explicit; go 1.21 github.com/bradleyfalzon/ghinstallation/v2 +# github.com/casbin/casbin/v2 v2.102.0 +## explicit; go 1.13 +github.com/casbin/casbin/v2 +github.com/casbin/casbin/v2/config +github.com/casbin/casbin/v2/constant +github.com/casbin/casbin/v2/effector +github.com/casbin/casbin/v2/errors +github.com/casbin/casbin/v2/log +github.com/casbin/casbin/v2/model +github.com/casbin/casbin/v2/persist +github.com/casbin/casbin/v2/persist/cache +github.com/casbin/casbin/v2/persist/file-adapter +github.com/casbin/casbin/v2/rbac +github.com/casbin/casbin/v2/rbac/default-role-manager +github.com/casbin/casbin/v2/util +# github.com/casbin/govaluate v1.2.0 +## explicit; go 1.13 +github.com/casbin/govaluate # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 @@ -197,9 +217,10 @@ github.com/exponent-io/jsonpath # github.com/fatih/camelcase v1.0.0 ## explicit github.com/fatih/camelcase -# github.com/fsnotify/fsnotify v1.7.0 +# github.com/fsnotify/fsnotify v1.8.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +github.com/fsnotify/fsnotify/internal # github.com/fxamacker/cbor/v2 v2.7.0 ## explicit; go 1.17 github.com/fxamacker/cbor/v2 @@ -309,7 +330,7 @@ github.com/gobwas/glob/util/strings ## explicit; go 1.15 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys -# github.com/golang-jwt/jwt/v4 v4.5.0 +# github.com/golang-jwt/jwt/v4 v4.5.2 ## explicit; go 1.16 github.com/golang-jwt/jwt/v4 # github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 @@ -335,9 +356,9 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-github/v62 v62.0.0 +# github.com/google/go-github/v66 v66.0.0 ## explicit; go 1.21 -github.com/google/go-github/v62/github +github.com/google/go-github/v66/github # github.com/google/go-querystring v1.1.0 ## explicit; go 1.10 github.com/google/go-querystring/query @@ -555,7 +576,7 @@ github.com/pjbgf/sha1cd/ubc # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus/client_golang v1.20.4 +# github.com/prometheus/client_golang v1.20.5 ## explicit; go 1.20 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header @@ -575,7 +596,7 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/redis/go-redis/v9 v9.6.1 +# github.com/redis/go-redis/v9 v9.7.1 ## explicit; go 1.18 github.com/redis/go-redis/v9 github.com/redis/go-redis/v9/internal @@ -697,8 +718,8 @@ golang.org/x/crypto/ssh golang.org/x/crypto/ssh/agent golang.org/x/crypto/ssh/internal/bcrypt_pbkdf golang.org/x/crypto/ssh/knownhosts -# golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 -## explicit; go 1.20 +# golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f +## explicit; go 1.22.0 golang.org/x/exp/maps # golang.org/x/net v0.35.0 ## explicit; go 1.18 @@ -715,7 +736,7 @@ golang.org/x/net/internal/httpcommon golang.org/x/net/internal/socks golang.org/x/net/proxy golang.org/x/net/websocket -# golang.org/x/oauth2 v0.23.0 +# golang.org/x/oauth2 v0.24.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler @@ -739,6 +760,7 @@ golang.org/x/sys/execabs golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows +golang.org/x/sys/windows/registry # golang.org/x/term v0.29.0 ## explicit; go 1.18 golang.org/x/term @@ -764,7 +786,7 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.7.0 +# golang.org/x/time v0.8.0 ## explicit; go 1.18 golang.org/x/time/rate # golang.org/x/tools v0.28.0 @@ -774,11 +796,11 @@ golang.org/x/tools/go/ast/inspector # gomodules.xyz/jsonpatch/v2 v2.4.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 ## explicit; go 1.21 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.66.2 -## explicit; go 1.21 +# google.golang.org/grpc v1.68.1 +## explicit; go 1.22 google.golang.org/grpc/codes google.golang.org/grpc/connectivity google.golang.org/grpc/grpclog @@ -1557,6 +1579,11 @@ oras.land/oras-go/v2/internal/syncutil oras.land/oras-go/v2/registry oras.land/oras-go/v2/registry/remote oras.land/oras-go/v2/registry/remote/auth +oras.land/oras-go/v2/registry/remote/credentials +oras.land/oras-go/v2/registry/remote/credentials/internal/config +oras.land/oras-go/v2/registry/remote/credentials/internal/executer +oras.land/oras-go/v2/registry/remote/credentials/internal/ioutil +oras.land/oras-go/v2/registry/remote/credentials/trace oras.land/oras-go/v2/registry/remote/errcode oras.land/oras-go/v2/registry/remote/internal/errutil oras.land/oras-go/v2/registry/remote/retry @@ -1699,7 +1726,7 @@ sigs.k8s.io/kustomize/kyaml/yaml/merge2 sigs.k8s.io/kustomize/kyaml/yaml/merge3 sigs.k8s.io/kustomize/kyaml/yaml/schema sigs.k8s.io/kustomize/kyaml/yaml/walk -# sigs.k8s.io/structured-merge-diff/v4 v4.4.1 +# sigs.k8s.io/structured-merge-diff/v4 v4.4.4-0.20241211184406-7bf59b3d70ee ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath sigs.k8s.io/structured-merge-diff/v4/merge diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/file_store.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/file_store.go new file mode 100644 index 000000000..7664cc2ab --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/file_store.go @@ -0,0 +1,97 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import ( + "context" + "errors" + "fmt" + "strings" + + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/credentials/internal/config" +) + +// FileStore implements a credentials store using the docker configuration file +// to keep the credentials in plain-text. +// +// Reference: https://docs.docker.com/engine/reference/commandline/cli/#docker-cli-configuration-file-configjson-properties +type FileStore struct { + // DisablePut disables putting credentials in plaintext. + // If DisablePut is set to true, Put() will return ErrPlaintextPutDisabled. + DisablePut bool + + config *config.Config +} + +var ( + // ErrPlaintextPutDisabled is returned by Put() when DisablePut is set + // to true. + ErrPlaintextPutDisabled = errors.New("putting plaintext credentials is disabled") + // ErrBadCredentialFormat is returned by Put() when the credential format + // is bad. + ErrBadCredentialFormat = errors.New("bad credential format") +) + +// NewFileStore creates a new file credentials store. +// +// Reference: https://docs.docker.com/engine/reference/commandline/cli/#docker-cli-configuration-file-configjson-properties +func NewFileStore(configPath string) (*FileStore, error) { + cfg, err := config.Load(configPath) + if err != nil { + return nil, err + } + return newFileStore(cfg), nil +} + +// newFileStore creates a file credentials store based on the given config instance. +func newFileStore(cfg *config.Config) *FileStore { + return &FileStore{config: cfg} +} + +// Get retrieves credentials from the store for the given server address. +func (fs *FileStore) Get(_ context.Context, serverAddress string) (auth.Credential, error) { + return fs.config.GetCredential(serverAddress) +} + +// Put saves credentials into the store for the given server address. +// Returns ErrPlaintextPutDisabled if fs.DisablePut is set to true. +func (fs *FileStore) Put(_ context.Context, serverAddress string, cred auth.Credential) error { + if fs.DisablePut { + return ErrPlaintextPutDisabled + } + if err := validateCredentialFormat(cred); err != nil { + return err + } + + return fs.config.PutCredential(serverAddress, cred) +} + +// Delete removes credentials from the store for the given server address. +func (fs *FileStore) Delete(_ context.Context, serverAddress string) error { + return fs.config.DeleteCredential(serverAddress) +} + +// validateCredentialFormat validates the format of cred. +func validateCredentialFormat(cred auth.Credential) error { + if strings.ContainsRune(cred.Username, ':') { + // Username and password will be encoded in the base64(username:password) + // format in the file. The decoded result will be wrong if username + // contains colon(s). + return fmt.Errorf("%w: colons(:) are not allowed in username", ErrBadCredentialFormat) + } + return nil +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/config/config.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/config/config.go new file mode 100644 index 000000000..20ee07437 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/config/config.go @@ -0,0 +1,332 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + "sync" + + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/credentials/internal/ioutil" +) + +const ( + // configFieldAuths is the "auths" field in the config file. + // Reference: https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L19 + configFieldAuths = "auths" + // configFieldCredentialsStore is the "credsStore" field in the config file. + configFieldCredentialsStore = "credsStore" + // configFieldCredentialHelpers is the "credHelpers" field in the config file. + configFieldCredentialHelpers = "credHelpers" +) + +// ErrInvalidConfigFormat is returned when the config format is invalid. +var ErrInvalidConfigFormat = errors.New("invalid config format") + +// AuthConfig contains authorization information for connecting to a Registry. +// References: +// - https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L17-L45 +// - https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/types/authconfig.go#L3-L22 +type AuthConfig struct { + // Auth is a base64-encoded string of "{username}:{password}". + Auth string `json:"auth,omitempty"` + // IdentityToken is used to authenticate the user and get an access token + // for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + // RegistryToken is a bearer token to be sent to a registry. + RegistryToken string `json:"registrytoken,omitempty"` + + Username string `json:"username,omitempty"` // legacy field for compatibility + Password string `json:"password,omitempty"` // legacy field for compatibility +} + +// NewAuthConfig creates an authConfig based on cred. +func NewAuthConfig(cred auth.Credential) AuthConfig { + return AuthConfig{ + Auth: encodeAuth(cred.Username, cred.Password), + IdentityToken: cred.RefreshToken, + RegistryToken: cred.AccessToken, + } +} + +// Credential returns an auth.Credential based on ac. +func (ac AuthConfig) Credential() (auth.Credential, error) { + cred := auth.Credential{ + Username: ac.Username, + Password: ac.Password, + RefreshToken: ac.IdentityToken, + AccessToken: ac.RegistryToken, + } + if ac.Auth != "" { + var err error + // override username and password + cred.Username, cred.Password, err = decodeAuth(ac.Auth) + if err != nil { + return auth.EmptyCredential, fmt.Errorf("failed to decode auth field: %w: %v", ErrInvalidConfigFormat, err) + } + } + return cred, nil +} + +// Config represents a docker configuration file. +// References: +// - https://docs.docker.com/engine/reference/commandline/cli/#docker-cli-configuration-file-configjson-properties +// - https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L17-L44 +type Config struct { + // path is the path to the config file. + path string + // rwLock is a read-write-lock for the file store. + rwLock sync.RWMutex + // content is the content of the config file. + // Reference: https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L17-L44 + content map[string]json.RawMessage + // authsCache is a cache of the auths field of the config. + // Reference: https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L19 + authsCache map[string]json.RawMessage + // credentialsStore is the credsStore field of the config. + // Reference: https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L28 + credentialsStore string + // credentialHelpers is the credHelpers field of the config. + // Reference: https://github.com/docker/cli/blob/v24.0.0-beta.2/cli/config/configfile/file.go#L29 + credentialHelpers map[string]string +} + +// Load loads Config from the given config path. +func Load(configPath string) (*Config, error) { + cfg := &Config{path: configPath} + configFile, err := os.Open(configPath) + if err != nil { + if os.IsNotExist(err) { + // init content and caches if the content file does not exist + cfg.content = make(map[string]json.RawMessage) + cfg.authsCache = make(map[string]json.RawMessage) + return cfg, nil + } + return nil, fmt.Errorf("failed to open config file at %s: %w", configPath, err) + } + defer configFile.Close() + + // decode config content if the config file exists + if err := json.NewDecoder(configFile).Decode(&cfg.content); err != nil { + return nil, fmt.Errorf("failed to decode config file at %s: %w: %v", configPath, ErrInvalidConfigFormat, err) + } + + if credsStoreBytes, ok := cfg.content[configFieldCredentialsStore]; ok { + if err := json.Unmarshal(credsStoreBytes, &cfg.credentialsStore); err != nil { + return nil, fmt.Errorf("failed to unmarshal creds store field: %w: %v", ErrInvalidConfigFormat, err) + } + } + + if credHelpersBytes, ok := cfg.content[configFieldCredentialHelpers]; ok { + if err := json.Unmarshal(credHelpersBytes, &cfg.credentialHelpers); err != nil { + return nil, fmt.Errorf("failed to unmarshal cred helpers field: %w: %v", ErrInvalidConfigFormat, err) + } + } + + if authsBytes, ok := cfg.content[configFieldAuths]; ok { + if err := json.Unmarshal(authsBytes, &cfg.authsCache); err != nil { + return nil, fmt.Errorf("failed to unmarshal auths field: %w: %v", ErrInvalidConfigFormat, err) + } + } + if cfg.authsCache == nil { + cfg.authsCache = make(map[string]json.RawMessage) + } + + return cfg, nil +} + +// GetAuthConfig returns an auth.Credential for serverAddress. +func (cfg *Config) GetCredential(serverAddress string) (auth.Credential, error) { + cfg.rwLock.RLock() + defer cfg.rwLock.RUnlock() + + authCfgBytes, ok := cfg.authsCache[serverAddress] + if !ok { + // NOTE: the auth key for the server address may have been stored with + // a http/https prefix in legacy config files, e.g. "registry.example.com" + // can be stored as "https://registry.example.com/". + var matched bool + for addr, auth := range cfg.authsCache { + if toHostname(addr) == serverAddress { + matched = true + authCfgBytes = auth + break + } + } + if !matched { + return auth.EmptyCredential, nil + } + } + var authCfg AuthConfig + if err := json.Unmarshal(authCfgBytes, &authCfg); err != nil { + return auth.EmptyCredential, fmt.Errorf("failed to unmarshal auth field: %w: %v", ErrInvalidConfigFormat, err) + } + return authCfg.Credential() +} + +// PutAuthConfig puts cred for serverAddress. +func (cfg *Config) PutCredential(serverAddress string, cred auth.Credential) error { + cfg.rwLock.Lock() + defer cfg.rwLock.Unlock() + + authCfg := NewAuthConfig(cred) + authCfgBytes, err := json.Marshal(authCfg) + if err != nil { + return fmt.Errorf("failed to marshal auth field: %w", err) + } + cfg.authsCache[serverAddress] = authCfgBytes + return cfg.saveFile() +} + +// DeleteAuthConfig deletes the corresponding credential for serverAddress. +func (cfg *Config) DeleteCredential(serverAddress string) error { + cfg.rwLock.Lock() + defer cfg.rwLock.Unlock() + + if _, ok := cfg.authsCache[serverAddress]; !ok { + // no ops + return nil + } + delete(cfg.authsCache, serverAddress) + return cfg.saveFile() +} + +// GetCredentialHelper returns the credential helpers for serverAddress. +func (cfg *Config) GetCredentialHelper(serverAddress string) string { + return cfg.credentialHelpers[serverAddress] +} + +// CredentialsStore returns the configured credentials store. +func (cfg *Config) CredentialsStore() string { + cfg.rwLock.RLock() + defer cfg.rwLock.RUnlock() + + return cfg.credentialsStore +} + +// Path returns the path to the config file. +func (cfg *Config) Path() string { + return cfg.path +} + +// SetCredentialsStore puts the configured credentials store. +func (cfg *Config) SetCredentialsStore(credsStore string) error { + cfg.rwLock.Lock() + defer cfg.rwLock.Unlock() + + cfg.credentialsStore = credsStore + return cfg.saveFile() +} + +// IsAuthConfigured returns whether there is authentication configured in this +// config file or not. +func (cfg *Config) IsAuthConfigured() bool { + return cfg.credentialsStore != "" || + len(cfg.credentialHelpers) > 0 || + len(cfg.authsCache) > 0 +} + +// saveFile saves Config into the file. +func (cfg *Config) saveFile() (returnErr error) { + // marshal content + // credentialHelpers is skipped as it's never set + if cfg.credentialsStore != "" { + credsStoreBytes, err := json.Marshal(cfg.credentialsStore) + if err != nil { + return fmt.Errorf("failed to marshal creds store: %w", err) + } + cfg.content[configFieldCredentialsStore] = credsStoreBytes + } else { + // omit empty + delete(cfg.content, configFieldCredentialsStore) + } + authsBytes, err := json.Marshal(cfg.authsCache) + if err != nil { + return fmt.Errorf("failed to marshal credentials: %w", err) + } + cfg.content[configFieldAuths] = authsBytes + jsonBytes, err := json.MarshalIndent(cfg.content, "", "\t") + if err != nil { + return fmt.Errorf("failed to marshal config: %w", err) + } + + // write the content to a ingest file for atomicity + configDir := filepath.Dir(cfg.path) + if err := os.MkdirAll(configDir, 0700); err != nil { + return fmt.Errorf("failed to make directory %s: %w", configDir, err) + } + ingest, err := ioutil.Ingest(configDir, bytes.NewReader(jsonBytes)) + if err != nil { + return fmt.Errorf("failed to save config file: %w", err) + } + defer func() { + if returnErr != nil { + // clean up the ingest file in case of error + os.Remove(ingest) + } + }() + + // overwrite the config file + if err := os.Rename(ingest, cfg.path); err != nil { + return fmt.Errorf("failed to save config file: %w", err) + } + return nil +} + +// encodeAuth base64-encodes username and password into base64(username:password). +func encodeAuth(username, password string) string { + if username == "" && password == "" { + return "" + } + return base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) +} + +// decodeAuth decodes a base64 encoded string and returns username and password. +func decodeAuth(authStr string) (username string, password string, err error) { + if authStr == "" { + return "", "", nil + } + + decoded, err := base64.StdEncoding.DecodeString(authStr) + if err != nil { + return "", "", err + } + decodedStr := string(decoded) + username, password, ok := strings.Cut(decodedStr, ":") + if !ok { + return "", "", fmt.Errorf("auth '%s' does not conform the base64(username:password) format", decodedStr) + } + return username, password, nil +} + +// toHostname normalizes a server address to just its hostname, removing +// the scheme and the path parts. +// It is used to match keys in the auths map, which may be either stored as +// hostname or as hostname including scheme (in legacy docker config files). +// Reference: https://github.com/docker/cli/blob/v24.0.6/cli/config/credentials/file_store.go#L71 +func toHostname(addr string) string { + addr = strings.TrimPrefix(addr, "http://") + addr = strings.TrimPrefix(addr, "https://") + addr, _, _ = strings.Cut(addr, "/") + return addr +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/executer/executer.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/executer/executer.go new file mode 100644 index 000000000..a074c6846 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/executer/executer.go @@ -0,0 +1,80 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package executer is an abstraction for the docker credential helper protocol +// binaries. It is used by nativeStore to interact with installed binaries. +package executer + +import ( + "bytes" + "context" + "errors" + "io" + "os" + "os/exec" + + "oras.land/oras-go/v2/registry/remote/credentials/trace" +) + +// dockerDesktopHelperName is the name of the docker credentials helper +// execuatable. +const dockerDesktopHelperName = "docker-credential-desktop.exe" + +// Executer is an interface that simulates an executable binary. +type Executer interface { + Execute(ctx context.Context, input io.Reader, action string) ([]byte, error) +} + +// executable implements the Executer interface. +type executable struct { + name string +} + +// New returns a new Executer instance. +func New(name string) Executer { + return &executable{ + name: name, + } +} + +// Execute operates on an executable binary and supports context. +func (c *executable) Execute(ctx context.Context, input io.Reader, action string) ([]byte, error) { + cmd := exec.CommandContext(ctx, c.name, action) + cmd.Stdin = input + cmd.Stderr = os.Stderr + trace := trace.ContextExecutableTrace(ctx) + if trace != nil && trace.ExecuteStart != nil { + trace.ExecuteStart(c.name, action) + } + output, err := cmd.Output() + if trace != nil && trace.ExecuteDone != nil { + trace.ExecuteDone(c.name, action, err) + } + if err != nil { + switch execErr := err.(type) { + case *exec.ExitError: + if errMessage := string(bytes.TrimSpace(output)); errMessage != "" { + return nil, errors.New(errMessage) + } + case *exec.Error: + // check if the error is caused by Docker Desktop not running + if execErr.Err == exec.ErrNotFound && c.name == dockerDesktopHelperName { + return nil, errors.New("credentials store is configured to `desktop.exe` but Docker Desktop seems not running") + } + } + return nil, err + } + return output, nil +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/ioutil/ioutil.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/ioutil/ioutil.go new file mode 100644 index 000000000..b2e3179dd --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/internal/ioutil/ioutil.go @@ -0,0 +1,49 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ioutil + +import ( + "fmt" + "io" + "os" +) + +// Ingest writes content into a temporary ingest file with the file name format +// "oras_credstore_temp_{randomString}". +func Ingest(dir string, content io.Reader) (path string, ingestErr error) { + tempFile, err := os.CreateTemp(dir, "oras_credstore_temp_*") + if err != nil { + return "", fmt.Errorf("failed to create ingest file: %w", err) + } + path = tempFile.Name() + defer func() { + if err := tempFile.Close(); err != nil && ingestErr == nil { + ingestErr = fmt.Errorf("failed to close ingest file: %w", err) + } + // remove the temp file in case of error. + if ingestErr != nil { + os.Remove(path) + } + }() + + if err := tempFile.Chmod(0600); err != nil { + return "", fmt.Errorf("failed to ensure permission: %w", err) + } + if _, err := io.Copy(tempFile, content); err != nil { + return "", fmt.Errorf("failed to ingest: %w", err) + } + return +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/memory_store.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/memory_store.go new file mode 100644 index 000000000..6eb7749b4 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/memory_store.go @@ -0,0 +1,54 @@ +/* + Copyright The ORAS Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package credentials + +import ( + "context" + "sync" + + "oras.land/oras-go/v2/registry/remote/auth" +) + +// memoryStore is a store that keeps credentials in memory. +type memoryStore struct { + store sync.Map +} + +// NewMemoryStore creates a new in-memory credentials store. +func NewMemoryStore() Store { + return &memoryStore{} +} + +// Get retrieves credentials from the store for the given server address. +func (ms *memoryStore) Get(_ context.Context, serverAddress string) (auth.Credential, error) { + cred, found := ms.store.Load(serverAddress) + if !found { + return auth.EmptyCredential, nil + } + return cred.(auth.Credential), nil +} + +// Put saves credentials into the store for the given server address. +func (ms *memoryStore) Put(_ context.Context, serverAddress string, cred auth.Credential) error { + ms.store.Store(serverAddress, cred) + return nil +} + +// Delete removes credentials from the store for the given server address. +func (ms *memoryStore) Delete(_ context.Context, serverAddress string) error { + ms.store.Delete(serverAddress) + return nil +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store.go new file mode 100644 index 000000000..9f4c7f742 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store.go @@ -0,0 +1,139 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import ( + "bytes" + "context" + "encoding/json" + "os/exec" + "strings" + + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/credentials/internal/executer" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" + emptyUsername = "" + errCredentialsNotFoundMessage = "credentials not found in native keychain" +) + +// dockerCredentials mimics how docker credential helper binaries store +// credential information. +// Reference: +// - https://docs.docker.com/engine/reference/commandline/login/#credential-helper-protocol +type dockerCredentials struct { + ServerURL string `json:"ServerURL"` + Username string `json:"Username"` + Secret string `json:"Secret"` +} + +// nativeStore implements a credentials store using native keychain to keep +// credentials secure. +type nativeStore struct { + exec executer.Executer +} + +// NewNativeStore creates a new native store that uses a remote helper program to +// manage credentials. +// +// The argument of NewNativeStore can be the native keychains +// ("wincred" for Windows, "pass" for linux and "osxkeychain" for macOS), +// or any program that follows the docker-credentials-helper protocol. +// +// Reference: +// - https://docs.docker.com/engine/reference/commandline/login#credentials-store +func NewNativeStore(helperSuffix string) Store { + return &nativeStore{ + exec: executer.New(remoteCredentialsPrefix + helperSuffix), + } +} + +// NewDefaultNativeStore returns a native store based on the platform-default +// docker credentials helper and a bool indicating if the native store is +// available. +// - Windows: "wincred" +// - Linux: "pass" or "secretservice" +// - macOS: "osxkeychain" +// +// Reference: +// - https://docs.docker.com/engine/reference/commandline/login/#credentials-store +func NewDefaultNativeStore() (Store, bool) { + if helper := getDefaultHelperSuffix(); helper != "" { + return NewNativeStore(helper), true + } + return nil, false +} + +// Get retrieves credentials from the store for the given server. +func (ns *nativeStore) Get(ctx context.Context, serverAddress string) (auth.Credential, error) { + var cred auth.Credential + out, err := ns.exec.Execute(ctx, strings.NewReader(serverAddress), "get") + if err != nil { + if err.Error() == errCredentialsNotFoundMessage { + // do not return an error if the credentials are not in the keychain. + return auth.EmptyCredential, nil + } + return auth.EmptyCredential, err + } + var dockerCred dockerCredentials + if err := json.Unmarshal(out, &dockerCred); err != nil { + return auth.EmptyCredential, err + } + // bearer auth is used if the username is "" + if dockerCred.Username == emptyUsername { + cred.RefreshToken = dockerCred.Secret + } else { + cred.Username = dockerCred.Username + cred.Password = dockerCred.Secret + } + return cred, nil +} + +// Put saves credentials into the store. +func (ns *nativeStore) Put(ctx context.Context, serverAddress string, cred auth.Credential) error { + dockerCred := &dockerCredentials{ + ServerURL: serverAddress, + Username: cred.Username, + Secret: cred.Password, + } + if cred.RefreshToken != "" { + dockerCred.Username = emptyUsername + dockerCred.Secret = cred.RefreshToken + } + credJSON, err := json.Marshal(dockerCred) + if err != nil { + return err + } + _, err = ns.exec.Execute(ctx, bytes.NewReader(credJSON), "store") + return err +} + +// Delete removes credentials from the store for the given server. +func (ns *nativeStore) Delete(ctx context.Context, serverAddress string) error { + _, err := ns.exec.Execute(ctx, strings.NewReader(serverAddress), "erase") + return err +} + +// getDefaultHelperSuffix returns the default credential helper suffix. +func getDefaultHelperSuffix() string { + platformDefault := getPlatformDefaultHelperSuffix() + if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { + return platformDefault + } + return "" +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_darwin.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_darwin.go new file mode 100644 index 000000000..1a9aca6fc --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_darwin.go @@ -0,0 +1,23 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// getPlatformDefaultHelperSuffix returns the platform default credential +// helper suffix. +// Reference: https://docs.docker.com/engine/reference/commandline/login/#default-behavior +func getPlatformDefaultHelperSuffix() string { + return "osxkeychain" +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_generic.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_generic.go new file mode 100644 index 000000000..5c7d4a3b2 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_generic.go @@ -0,0 +1,25 @@ +//go:build !windows && !darwin && !linux + +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// getPlatformDefaultHelperSuffix returns the platform default credential +// helper suffix. +// Reference: https://docs.docker.com/engine/reference/commandline/login/#default-behavior +func getPlatformDefaultHelperSuffix() string { + return "" +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_linux.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_linux.go new file mode 100644 index 000000000..f182923b7 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_linux.go @@ -0,0 +1,29 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import "os/exec" + +// getPlatformDefaultHelperSuffix returns the platform default credential +// helper suffix. +// Reference: https://docs.docker.com/engine/reference/commandline/login/#default-behavior +func getPlatformDefaultHelperSuffix() string { + if _, err := exec.LookPath("pass"); err == nil { + return "pass" + } + + return "secretservice" +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_windows.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_windows.go new file mode 100644 index 000000000..e334cc79b --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/native_store_windows.go @@ -0,0 +1,23 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +// getPlatformDefaultHelperSuffix returns the platform default credential +// helper suffix. +// Reference: https://docs.docker.com/engine/reference/commandline/login/#default-behavior +func getPlatformDefaultHelperSuffix() string { + return "wincred" +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/registry.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/registry.go new file mode 100644 index 000000000..39735b77c --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/registry.go @@ -0,0 +1,102 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package credentials + +import ( + "context" + "errors" + "fmt" + + "oras.land/oras-go/v2/registry/remote" + "oras.land/oras-go/v2/registry/remote/auth" +) + +// ErrClientTypeUnsupported is thrown by Login() when the registry's client type +// is not supported. +var ErrClientTypeUnsupported = errors.New("client type not supported") + +// Login provides the login functionality with the given credentials. The target +// registry's client should be nil or of type *auth.Client. Login uses +// a client local to the function and will not modify the original client of +// the registry. +func Login(ctx context.Context, store Store, reg *remote.Registry, cred auth.Credential) error { + // create a clone of the original registry for login purpose + regClone := *reg + // we use the original client if applicable, otherwise use a default client + var authClient auth.Client + if reg.Client == nil { + authClient = *auth.DefaultClient + authClient.Cache = nil // no cache + } else if client, ok := reg.Client.(*auth.Client); ok { + authClient = *client + } else { + return ErrClientTypeUnsupported + } + regClone.Client = &authClient + // update credentials with the client + authClient.Credential = auth.StaticCredential(reg.Reference.Registry, cred) + // validate and store the credential + if err := regClone.Ping(ctx); err != nil { + return fmt.Errorf("failed to validate the credentials for %s: %w", regClone.Reference.Registry, err) + } + hostname := ServerAddressFromRegistry(regClone.Reference.Registry) + if err := store.Put(ctx, hostname, cred); err != nil { + return fmt.Errorf("failed to store the credentials for %s: %w", hostname, err) + } + return nil +} + +// Logout provides the logout functionality given the registry name. +func Logout(ctx context.Context, store Store, registryName string) error { + registryName = ServerAddressFromRegistry(registryName) + if err := store.Delete(ctx, registryName); err != nil { + return fmt.Errorf("failed to delete the credential for %s: %w", registryName, err) + } + return nil +} + +// Credential returns a Credential() function that can be used by auth.Client. +func Credential(store Store) auth.CredentialFunc { + return func(ctx context.Context, hostport string) (auth.Credential, error) { + hostport = ServerAddressFromHostname(hostport) + if hostport == "" { + return auth.EmptyCredential, nil + } + return store.Get(ctx, hostport) + } +} + +// ServerAddressFromRegistry maps a registry to a server address, which is used as +// a key for credentials store. The Docker CLI expects that the credentials of +// the registry 'docker.io' will be added under the key "https://index.docker.io/v1/". +// See: https://github.com/moby/moby/blob/v24.0.2/registry/config.go#L25-L48 +func ServerAddressFromRegistry(registry string) string { + if registry == "docker.io" { + return "https://index.docker.io/v1/" + } + return registry +} + +// ServerAddressFromHostname maps a hostname to a server address, which is used as +// a key for credentials store. It is expected that the traffic targetting the +// host "registry-1.docker.io" will be redirected to "https://index.docker.io/v1/". +// See: https://github.com/moby/moby/blob/v24.0.2/registry/config.go#L25-L48 +func ServerAddressFromHostname(hostname string) string { + if hostname == "registry-1.docker.io" { + return "https://index.docker.io/v1/" + } + return hostname +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/store.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/store.go new file mode 100644 index 000000000..e26a98ae7 --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/store.go @@ -0,0 +1,262 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package credentials supports reading, saving, and removing credentials from +// Docker configuration files and external credential stores that follow +// the Docker credential helper protocol. +// +// Reference: https://docs.docker.com/engine/reference/commandline/login/#credential-stores +package credentials + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "oras.land/oras-go/v2/internal/syncutil" + "oras.land/oras-go/v2/registry/remote/auth" + "oras.land/oras-go/v2/registry/remote/credentials/internal/config" +) + +const ( + dockerConfigDirEnv = "DOCKER_CONFIG" + dockerConfigFileDir = ".docker" + dockerConfigFileName = "config.json" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Get retrieves credentials from the store for the given server address. + Get(ctx context.Context, serverAddress string) (auth.Credential, error) + // Put saves credentials into the store for the given server address. + Put(ctx context.Context, serverAddress string, cred auth.Credential) error + // Delete removes credentials from the store for the given server address. + Delete(ctx context.Context, serverAddress string) error +} + +// DynamicStore dynamically determines which store to use based on the settings +// in the config file. +type DynamicStore struct { + config *config.Config + options StoreOptions + detectedCredsStore string + setCredsStoreOnce syncutil.OnceOrRetry +} + +// StoreOptions provides options for NewStore. +type StoreOptions struct { + // AllowPlaintextPut allows saving credentials in plaintext in the config + // file. + // - If AllowPlaintextPut is set to false (default value), Put() will + // return an error when native store is not available. + // - If AllowPlaintextPut is set to true, Put() will save credentials in + // plaintext in the config file when native store is not available. + AllowPlaintextPut bool + + // DetectDefaultNativeStore enables detecting the platform-default native + // credentials store when the config file has no authentication information. + // + // If DetectDefaultNativeStore is set to true, the store will detect and set + // the default native credentials store in the "credsStore" field of the + // config file. + // - Windows: "wincred" + // - Linux: "pass" or "secretservice" + // - macOS: "osxkeychain" + // + // References: + // - https://docs.docker.com/engine/reference/commandline/login/#credentials-store + // - https://docs.docker.com/engine/reference/commandline/cli/#docker-cli-configuration-file-configjson-properties + DetectDefaultNativeStore bool +} + +// NewStore returns a Store based on the given configuration file. +// +// For Get(), Put() and Delete(), the returned Store will dynamically determine +// which underlying credentials store to use for the given server address. +// The underlying credentials store is determined in the following order: +// 1. Native server-specific credential helper +// 2. Native credentials store +// 3. The plain-text config file itself +// +// References: +// - https://docs.docker.com/engine/reference/commandline/login/#credentials-store +// - https://docs.docker.com/engine/reference/commandline/cli/#docker-cli-configuration-file-configjson-properties +func NewStore(configPath string, opts StoreOptions) (*DynamicStore, error) { + cfg, err := config.Load(configPath) + if err != nil { + return nil, err + } + ds := &DynamicStore{ + config: cfg, + options: opts, + } + if opts.DetectDefaultNativeStore && !cfg.IsAuthConfigured() { + // no authentication configured, detect the default credentials store + ds.detectedCredsStore = getDefaultHelperSuffix() + } + return ds, nil +} + +// NewStoreFromDocker returns a Store based on the default docker config file. +// - If the $DOCKER_CONFIG environment variable is set, +// $DOCKER_CONFIG/config.json will be used. +// - Otherwise, the default location $HOME/.docker/config.json will be used. +// +// NewStoreFromDocker internally calls [NewStore]. +// +// References: +// - https://docs.docker.com/engine/reference/commandline/cli/#configuration-files +// - https://docs.docker.com/engine/reference/commandline/cli/#change-the-docker-directory +func NewStoreFromDocker(opt StoreOptions) (*DynamicStore, error) { + configPath, err := getDockerConfigPath() + if err != nil { + return nil, err + } + return NewStore(configPath, opt) +} + +// Get retrieves credentials from the store for the given server address. +func (ds *DynamicStore) Get(ctx context.Context, serverAddress string) (auth.Credential, error) { + return ds.getStore(serverAddress).Get(ctx, serverAddress) +} + +// Put saves credentials into the store for the given server address. +// Put returns ErrPlaintextPutDisabled if native store is not available and +// [StoreOptions].AllowPlaintextPut is set to false. +func (ds *DynamicStore) Put(ctx context.Context, serverAddress string, cred auth.Credential) error { + if err := ds.getStore(serverAddress).Put(ctx, serverAddress, cred); err != nil { + return err + } + // save the detected creds store back to the config file on first put + return ds.setCredsStoreOnce.Do(func() error { + if ds.detectedCredsStore != "" { + if err := ds.config.SetCredentialsStore(ds.detectedCredsStore); err != nil { + return fmt.Errorf("failed to set credsStore: %w", err) + } + } + return nil + }) +} + +// Delete removes credentials from the store for the given server address. +func (ds *DynamicStore) Delete(ctx context.Context, serverAddress string) error { + return ds.getStore(serverAddress).Delete(ctx, serverAddress) +} + +// IsAuthConfigured returns whether there is authentication configured in the +// config file or not. +// +// IsAuthConfigured returns true when: +// - The "credsStore" field is not empty +// - Or the "credHelpers" field is not empty +// - Or there is any entry in the "auths" field +func (ds *DynamicStore) IsAuthConfigured() bool { + return ds.config.IsAuthConfigured() +} + +// ConfigPath returns the path to the config file. +func (ds *DynamicStore) ConfigPath() string { + return ds.config.Path() +} + +// getHelperSuffix returns the credential helper suffix for the given server +// address. +func (ds *DynamicStore) getHelperSuffix(serverAddress string) string { + // 1. Look for a server-specific credential helper first + if helper := ds.config.GetCredentialHelper(serverAddress); helper != "" { + return helper + } + // 2. Then look for the configured native store + if credsStore := ds.config.CredentialsStore(); credsStore != "" { + return credsStore + } + // 3. Use the detected default store + return ds.detectedCredsStore +} + +// getStore returns a store for the given server address. +func (ds *DynamicStore) getStore(serverAddress string) Store { + if helper := ds.getHelperSuffix(serverAddress); helper != "" { + return NewNativeStore(helper) + } + + fs := newFileStore(ds.config) + fs.DisablePut = !ds.options.AllowPlaintextPut + return fs +} + +// getDockerConfigPath returns the path to the default docker config file. +func getDockerConfigPath() (string, error) { + // first try the environment variable + configDir := os.Getenv(dockerConfigDirEnv) + if configDir == "" { + // then try home directory + homeDir, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("failed to get user home directory: %w", err) + } + configDir = filepath.Join(homeDir, dockerConfigFileDir) + } + return filepath.Join(configDir, dockerConfigFileName), nil +} + +// storeWithFallbacks is a store that has multiple fallback stores. +type storeWithFallbacks struct { + stores []Store +} + +// NewStoreWithFallbacks returns a new store based on the given stores. +// - Get() searches the primary and the fallback stores +// for the credentials and returns when it finds the +// credentials in any of the stores. +// - Put() saves the credentials into the primary store. +// - Delete() deletes the credentials from the primary store. +func NewStoreWithFallbacks(primary Store, fallbacks ...Store) Store { + if len(fallbacks) == 0 { + return primary + } + return &storeWithFallbacks{ + stores: append([]Store{primary}, fallbacks...), + } +} + +// Get retrieves credentials from the StoreWithFallbacks for the given server. +// It searches the primary and the fallback stores for the credentials of serverAddress +// and returns when it finds the credentials in any of the stores. +func (sf *storeWithFallbacks) Get(ctx context.Context, serverAddress string) (auth.Credential, error) { + for _, s := range sf.stores { + cred, err := s.Get(ctx, serverAddress) + if err != nil { + return auth.EmptyCredential, err + } + if cred != auth.EmptyCredential { + return cred, nil + } + } + return auth.EmptyCredential, nil +} + +// Put saves credentials into the StoreWithFallbacks. It puts +// the credentials into the primary store. +func (sf *storeWithFallbacks) Put(ctx context.Context, serverAddress string, cred auth.Credential) error { + return sf.stores[0].Put(ctx, serverAddress, cred) +} + +// Delete removes credentials from the StoreWithFallbacks for the given server. +// It deletes the credentials from the primary store. +func (sf *storeWithFallbacks) Delete(ctx context.Context, serverAddress string) error { + return sf.stores[0].Delete(ctx, serverAddress) +} diff --git a/vendor/oras.land/oras-go/v2/registry/remote/credentials/trace/trace.go b/vendor/oras.land/oras-go/v2/registry/remote/credentials/trace/trace.go new file mode 100644 index 000000000..b7cd8683c --- /dev/null +++ b/vendor/oras.land/oras-go/v2/registry/remote/credentials/trace/trace.go @@ -0,0 +1,94 @@ +/* +Copyright The ORAS Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import "context" + +// executableTraceContextKey is a value key used to retrieve the ExecutableTrace +// from Context. +type executableTraceContextKey struct{} + +// ExecutableTrace is a set of hooks used to trace the execution of binary +// executables. Any particular hook may be nil. +type ExecutableTrace struct { + // ExecuteStart is called before the execution of the executable. The + // executableName parameter is the name of the credential helper executable + // used with NativeStore. The action parameter is one of "store", "get" and + // "erase". + // + // Reference: + // - https://docs.docker.com/engine/reference/commandline/login#credentials-store + ExecuteStart func(executableName string, action string) + + // ExecuteDone is called after the execution of an executable completes. + // The executableName parameter is the name of the credential helper + // executable used with NativeStore. The action parameter is one of "store", + // "get" and "erase". The err parameter is the error (if any) returned from + // the execution. + // + // Reference: + // - https://docs.docker.com/engine/reference/commandline/login#credentials-store + ExecuteDone func(executableName string, action string, err error) +} + +// ContextExecutableTrace returns the ExecutableTrace associated with the +// context. If none, it returns nil. +func ContextExecutableTrace(ctx context.Context) *ExecutableTrace { + trace, _ := ctx.Value(executableTraceContextKey{}).(*ExecutableTrace) + return trace +} + +// WithExecutableTrace takes a Context and an ExecutableTrace, and returns a +// Context with the ExecutableTrace added as a Value. If the Context has a +// previously added trace, the hooks defined in the new trace will be added +// in addition to the previous ones. The recent hooks will be called first. +func WithExecutableTrace(ctx context.Context, trace *ExecutableTrace) context.Context { + if trace == nil { + return ctx + } + if oldTrace := ContextExecutableTrace(ctx); oldTrace != nil { + trace.compose(oldTrace) + } + return context.WithValue(ctx, executableTraceContextKey{}, trace) +} + +// compose takes an oldTrace and modifies the existing trace to include +// the hooks defined in the oldTrace. The hooks in the existing trace will +// be called first. +func (trace *ExecutableTrace) compose(oldTrace *ExecutableTrace) { + if oldStart := oldTrace.ExecuteStart; oldStart != nil { + start := trace.ExecuteStart + if start != nil { + trace.ExecuteStart = func(executableName, action string) { + start(executableName, action) + oldStart(executableName, action) + } + } else { + trace.ExecuteStart = oldStart + } + } + if oldDone := oldTrace.ExecuteDone; oldDone != nil { + done := trace.ExecuteDone + if done != nil { + trace.ExecuteDone = func(executableName, action string, err error) { + done(executableName, action, err) + oldDone(executableName, action, err) + } + } else { + trace.ExecuteDone = oldDone + } + } +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go index 6d182768d..77ae25116 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/fieldpath/set.go @@ -17,6 +17,8 @@ limitations under the License. package fieldpath import ( + "fmt" + "sigs.k8s.io/structured-merge-diff/v4/value" "sort" "strings" @@ -136,6 +138,198 @@ func (s *Set) EnsureNamedFieldsAreMembers(sc *schema.Schema, tr schema.TypeRef) } } +// MakePrefixMatcherOrDie is the same as PrefixMatcher except it panics if parts can't be +// turned into a SetMatcher. +func MakePrefixMatcherOrDie(parts ...interface{}) *SetMatcher { + result, err := PrefixMatcher(parts...) + if err != nil { + panic(err) + } + return result +} + +// PrefixMatcher creates a SetMatcher that matches all field paths prefixed by the given list of matcher path parts. +// The matcher parts may any of: +// +// - PathElementMatcher - for wildcards, `MatchAnyPathElement()` can be used as well. +// - PathElement - for any path element +// - value.FieldList - for listMap keys +// - value.Value - for scalar list elements +// - string - For field names +// - int - for array indices +func PrefixMatcher(parts ...interface{}) (*SetMatcher, error) { + current := MatchAnySet() // match all field path suffixes + for i := len(parts) - 1; i >= 0; i-- { + part := parts[i] + var pattern PathElementMatcher + switch t := part.(type) { + case PathElementMatcher: + // any path matcher, including wildcard + pattern = t + case PathElement: + // any path element + pattern = PathElementMatcher{PathElement: t} + case *value.FieldList: + // a listMap key + if len(*t) == 0 { + return nil, fmt.Errorf("associative list key type path elements must have at least one key (got zero)") + } + pattern = PathElementMatcher{PathElement: PathElement{Key: t}} + case value.Value: + // a scalar or set-type list element + pattern = PathElementMatcher{PathElement: PathElement{Value: &t}} + case string: + // a plain field name + pattern = PathElementMatcher{PathElement: PathElement{FieldName: &t}} + case int: + // a plain list index + pattern = PathElementMatcher{PathElement: PathElement{Index: &t}} + default: + return nil, fmt.Errorf("unexpected type %T", t) + } + current = &SetMatcher{ + members: []*SetMemberMatcher{{ + Path: pattern, + Child: current, + }}, + } + } + return current, nil +} + +// MatchAnyPathElement returns a PathElementMatcher that matches any path element. +func MatchAnyPathElement() PathElementMatcher { + return PathElementMatcher{Wildcard: true} +} + +// MatchAnySet returns a SetMatcher that matches any set. +func MatchAnySet() *SetMatcher { + return &SetMatcher{wildcard: true} +} + +// NewSetMatcher returns a new SetMatcher. +// Wildcard members take precedent over non-wildcard members; +// all non-wildcard members are ignored if there is a wildcard members. +func NewSetMatcher(wildcard bool, members ...*SetMemberMatcher) *SetMatcher { + sort.Sort(sortedMemberMatcher(members)) + return &SetMatcher{wildcard: wildcard, members: members} +} + +// SetMatcher defines a matcher that matches fields in a Set. +// SetMatcher is structured much like a Set but with wildcard support. +type SetMatcher struct { + // wildcard indicates that all members and children are included in the match. + // If set, the members field is ignored. + wildcard bool + // members provides patterns to match the members of a Set. + // Wildcard members are sorted before non-wildcards and take precedent over + // non-wildcard members. + members sortedMemberMatcher +} + +type sortedMemberMatcher []*SetMemberMatcher + +func (s sortedMemberMatcher) Len() int { return len(s) } +func (s sortedMemberMatcher) Less(i, j int) bool { return s[i].Path.Less(s[j].Path) } +func (s sortedMemberMatcher) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sortedMemberMatcher) Find(p PathElementMatcher) (location int, ok bool) { + return sort.Find(len(s), func(i int) int { + return s[i].Path.Compare(p) + }) +} + +// Merge merges s and s2 and returns a SetMatcher that matches all field paths matched by either s or s2. +// During the merge, members of s and s2 with the same PathElementMatcher merged into a single member +// with the children of each merged by calling this function recursively. +func (s *SetMatcher) Merge(s2 *SetMatcher) *SetMatcher { + if s.wildcard || s2.wildcard { + return NewSetMatcher(true) + } + merged := make(sortedMemberMatcher, len(s.members), len(s.members)+len(s2.members)) + copy(merged, s.members) + for _, m := range s2.members { + if i, ok := s.members.Find(m.Path); ok { + // since merged is a shallow copy, do not modify elements in place + merged[i] = &SetMemberMatcher{ + Path: merged[i].Path, + Child: merged[i].Child.Merge(m.Child), + } + } else { + merged = append(merged, m) + } + } + return NewSetMatcher(false, merged...) // sort happens here +} + +// SetMemberMatcher defines a matcher that matches the members of a Set. +// SetMemberMatcher is structured much like the elements of a SetNodeMap, but +// with wildcard support. +type SetMemberMatcher struct { + // Path provides a matcher to match members of a Set. + // If Path is a wildcard, all members of a Set are included in the match. + // Otherwise, if any Path is Equal to a member of a Set, that member is + // included in the match and the children of that member are matched + // against the Child matcher. + Path PathElementMatcher + + // Child provides a matcher to use for the children of matched members of a Set. + Child *SetMatcher +} + +// PathElementMatcher defined a path matcher for a PathElement. +type PathElementMatcher struct { + // Wildcard indicates that all PathElements are matched by this matcher. + // If set, PathElement is ignored. + Wildcard bool + + // PathElement indicates that a PathElement is matched if it is Equal + // to this PathElement. + PathElement +} + +func (p PathElementMatcher) Equals(p2 PathElementMatcher) bool { + return p.Wildcard != p2.Wildcard && p.PathElement.Equals(p2.PathElement) +} + +func (p PathElementMatcher) Less(p2 PathElementMatcher) bool { + if p.Wildcard && !p2.Wildcard { + return true + } else if p2.Wildcard { + return false + } + return p.PathElement.Less(p2.PathElement) +} + +func (p PathElementMatcher) Compare(p2 PathElementMatcher) int { + if p.Wildcard && !p2.Wildcard { + return -1 + } else if p2.Wildcard { + return 1 + } + return p.PathElement.Compare(p2.PathElement) +} + +// FilterIncludeMatches returns a Set with only the field paths that match. +func (s *Set) FilterIncludeMatches(pattern *SetMatcher) *Set { + if pattern.wildcard { + return s + } + + members := PathElementSet{} + for _, m := range s.Members.members { + for _, pm := range pattern.members { + if pm.Path.Wildcard || pm.Path.PathElement.Equals(m) { + members.Insert(m) + break + } + } + } + return &Set{ + Members: members, + Children: *s.Children.FilterIncludeMatches(pattern), + } +} + // Size returns the number of members of the set. func (s *Set) Size() int { return s.Members.Size() + s.Children.Size() @@ -476,6 +670,33 @@ func (s *SetNodeMap) EnsureNamedFieldsAreMembers(sc *schema.Schema, tr schema.Ty } } +// FilterIncludeMatches returns a SetNodeMap with only the field paths that match the matcher. +func (s *SetNodeMap) FilterIncludeMatches(pattern *SetMatcher) *SetNodeMap { + if pattern.wildcard { + return s + } + + var out sortedSetNode + for _, member := range s.members { + for _, c := range pattern.members { + if c.Path.Wildcard || c.Path.PathElement.Equals(member.pathElement) { + childSet := member.set.FilterIncludeMatches(c.Child) + if childSet.Size() > 0 { + out = append(out, setNode{ + pathElement: member.pathElement, + set: childSet, + }) + } + break + } + } + } + + return &SetNodeMap{ + members: out, + } +} + // Iterate calls f for each PathElement in the set. func (s *SetNodeMap) Iterate(f func(PathElement)) { for _, n := range s.members { @@ -503,3 +724,59 @@ func (s *SetNodeMap) Leaves() *SetNodeMap { } return out } + +// Filter defines an interface for excluding field paths from a set. +// NewExcludeSetFilter can be used to create a filter that removes +// specific field paths and all of their children. +// NewIncludeMatcherFilter can be used to create a filter that removes all fields except +// the fields that match a field path matcher. PrefixMatcher and MakePrefixMatcherOrDie +// can be used to define field path patterns. +type Filter interface { + // Filter returns a filtered copy of the set. + Filter(*Set) *Set +} + +// NewExcludeSetFilter returns a filter that removes field paths in the exclude set. +func NewExcludeSetFilter(exclude *Set) Filter { + return excludeFilter{exclude} +} + +// NewExcludeFilterSetMap converts a map of APIVersion to exclude set to a map of APIVersion to exclude filters. +func NewExcludeFilterSetMap(resetFields map[APIVersion]*Set) map[APIVersion]Filter { + result := make(map[APIVersion]Filter) + for k, v := range resetFields { + result[k] = excludeFilter{v} + } + return result +} + +type excludeFilter struct { + excludeSet *Set +} + +func (t excludeFilter) Filter(set *Set) *Set { + return set.RecursiveDifference(t.excludeSet) +} + +// NewIncludeMatcherFilter returns a filter that only includes field paths that match. +// If no matchers are provided, the filter includes all field paths. +// PrefixMatcher and MakePrefixMatcherOrDie can help create basic matcher. +func NewIncludeMatcherFilter(matchers ...*SetMatcher) Filter { + if len(matchers) == 0 { + return includeMatcherFilter{&SetMatcher{wildcard: true}} + } + matcher := matchers[0] + for i := 1; i < len(matchers); i++ { + matcher = matcher.Merge(matchers[i]) + } + + return includeMatcherFilter{matcher} +} + +type includeMatcherFilter struct { + matcher *SetMatcher +} + +func (pf includeMatcherFilter) Filter(set *Set) *Set { + return set.FilterIncludeMatches(pf.matcher) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go index d5a977d60..455818ff8 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/merge/update.go @@ -15,7 +15,6 @@ package merge import ( "fmt" - "sigs.k8s.io/structured-merge-diff/v4/fieldpath" "sigs.k8s.io/structured-merge-diff/v4/typed" "sigs.k8s.io/structured-merge-diff/v4/value" @@ -31,7 +30,10 @@ type Converter interface { // UpdateBuilder allows you to create a new Updater by exposing all of // the options and setting them once. type UpdaterBuilder struct { - Converter Converter + Converter Converter + IgnoreFilter map[fieldpath.APIVersion]fieldpath.Filter + + // IgnoredFields provides a set of fields to ignore for each IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set // Stop comparing the new object with old object after applying. @@ -46,6 +48,7 @@ type UpdaterBuilder struct { func (u *UpdaterBuilder) BuildUpdater() *Updater { return &Updater{ Converter: u.Converter, + IgnoreFilter: u.IgnoreFilter, IgnoredFields: u.IgnoredFields, returnInputOnNoop: u.ReturnInputOnNoop, } @@ -60,6 +63,9 @@ type Updater struct { // Deprecated: This will eventually become private. IgnoredFields map[fieldpath.APIVersion]*fieldpath.Set + // Deprecated: This will eventually become private. + IgnoreFilter map[fieldpath.APIVersion]fieldpath.Filter + returnInputOnNoop bool } @@ -71,8 +77,19 @@ func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpa return nil, nil, fmt.Errorf("failed to compare objects: %v", err) } - versions := map[fieldpath.APIVersion]*typed.Comparison{ - version: compare.ExcludeFields(s.IgnoredFields[version]), + var versions map[fieldpath.APIVersion]*typed.Comparison + + if s.IgnoredFields != nil && s.IgnoreFilter != nil { + return nil, nil, fmt.Errorf("IgnoreFilter and IgnoreFilter may not both be set") + } + if s.IgnoredFields != nil { + versions = map[fieldpath.APIVersion]*typed.Comparison{ + version: compare.ExcludeFields(s.IgnoredFields[version]), + } + } else { + versions = map[fieldpath.APIVersion]*typed.Comparison{ + version: compare.FilterFields(s.IgnoreFilter[version]), + } } for manager, managerSet := range managers { @@ -102,7 +119,12 @@ func (s *Updater) update(oldObject, newObject *typed.TypedValue, version fieldpa if err != nil { return nil, nil, fmt.Errorf("failed to compare objects: %v", err) } - versions[managerSet.APIVersion()] = compare.ExcludeFields(s.IgnoredFields[managerSet.APIVersion()]) + + if s.IgnoredFields != nil { + versions[managerSet.APIVersion()] = compare.ExcludeFields(s.IgnoredFields[managerSet.APIVersion()]) + } else { + versions[managerSet.APIVersion()] = compare.FilterFields(s.IgnoreFilter[managerSet.APIVersion()]) + } } conflictSet := managerSet.Set().Intersection(compare.Modified.Union(compare.Added)) @@ -154,13 +176,23 @@ func (s *Updater) Update(liveObject, newObject *typed.TypedValue, version fieldp if _, ok := managers[manager]; !ok { managers[manager] = fieldpath.NewVersionedSet(fieldpath.NewSet(), version, false) } + set := managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added) - ignored := s.IgnoredFields[version] - if ignored == nil { - ignored = fieldpath.NewSet() + if s.IgnoredFields != nil && s.IgnoreFilter != nil { + return nil, nil, fmt.Errorf("IgnoreFilter and IgnoreFilter may not both be set") + } + var ignoreFilter fieldpath.Filter + if s.IgnoredFields != nil { + ignoreFilter = fieldpath.NewExcludeSetFilter(s.IgnoredFields[version]) + } else { + ignoreFilter = s.IgnoreFilter[version] + } + if ignoreFilter != nil { + set = ignoreFilter.Filter(set) } + managers[manager] = fieldpath.NewVersionedSet( - managers[manager].Set().Difference(compare.Removed).Union(compare.Modified).Union(compare.Added).RecursiveDifference(ignored), + set, version, false, ) @@ -189,13 +221,17 @@ func (s *Updater) Apply(liveObject, configObject *typed.TypedValue, version fiel return nil, fieldpath.ManagedFields{}, fmt.Errorf("failed to get field set: %v", err) } - ignored := s.IgnoredFields[version] - if ignored != nil { - set = set.RecursiveDifference(ignored) - // TODO: is this correct. If we don't remove from lastSet pruning might remove the fields? - if lastSet != nil { - lastSet.Set().RecursiveDifference(ignored) - } + if s.IgnoredFields != nil && s.IgnoreFilter != nil { + return nil, nil, fmt.Errorf("IgnoreFilter and IgnoreFilter may not both be set") + } + var ignoreFilter fieldpath.Filter + if s.IgnoredFields != nil { + ignoreFilter = fieldpath.NewExcludeSetFilter(s.IgnoredFields[version]) + } else { + ignoreFilter = s.IgnoreFilter[version] + } + if ignoreFilter != nil { + set = ignoreFilter.Filter(set) } managers[manager] = fieldpath.NewVersionedSet(set, version, true) newObject, err = s.prune(newObject, managers, manager, lastSet) diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go index ed483cbbc..5fffa5e2c 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/compare.go @@ -72,6 +72,16 @@ func (c *Comparison) ExcludeFields(fields *fieldpath.Set) *Comparison { return c } +func (c *Comparison) FilterFields(filter fieldpath.Filter) *Comparison { + if filter == nil { + return c + } + c.Removed = filter.Filter(c.Removed) + c.Modified = filter.Filter(c.Modified) + c.Added = filter.Filter(c.Added) + return c +} + type compareWalker struct { lhs value.Value rhs value.Value diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go index 4258ee5ba..0e9f7cc7e 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/parser.go @@ -19,9 +19,9 @@ package typed import ( "fmt" - yaml "gopkg.in/yaml.v2" "sigs.k8s.io/structured-merge-diff/v4/schema" "sigs.k8s.io/structured-merge-diff/v4/value" + yaml "sigs.k8s.io/yaml/goyaml.v2" ) // YAMLObject is an object encoded in YAML. diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go index 9be902828..7edaa6d48 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/typed/typed.go @@ -32,6 +32,21 @@ const ( AllowDuplicates ValidationOptions = iota ) +// extractItemsOptions is the options available when extracting items. +type extractItemsOptions struct { + appendKeyFields bool +} + +type ExtractItemsOption func(*extractItemsOptions) + +// WithAppendKeyFields configures ExtractItems to include key fields. +// It is exported for use in configuring ExtractItems. +func WithAppendKeyFields() ExtractItemsOption { + return func(opts *extractItemsOptions) { + opts.appendKeyFields = true + } +} + // AsTyped accepts a value and a type and returns a TypedValue. 'v' must have // type 'typeName' in the schema. An error is returned if the v doesn't conform // to the schema. @@ -187,7 +202,37 @@ func (tv TypedValue) RemoveItems(items *fieldpath.Set) *TypedValue { } // ExtractItems returns a value with only the provided list or map items extracted from the value. -func (tv TypedValue) ExtractItems(items *fieldpath.Set) *TypedValue { +func (tv TypedValue) ExtractItems(items *fieldpath.Set, opts ...ExtractItemsOption) *TypedValue { + options := &extractItemsOptions{} + for _, opt := range opts { + opt(options) + } + if options.appendKeyFields { + tvPathSet, err := tv.ToFieldSet() + if err == nil { + keyFieldPathSet := fieldpath.NewSet() + items.Iterate(func(path fieldpath.Path) { + if !tvPathSet.Has(path) { + return + } + for i, pe := range path { + if pe.Key == nil { + continue + } + for _, keyField := range *pe.Key { + keyName := keyField.Name + // Create a new slice with the same elements as path[:i+1], but set its capacity to len(path[:i+1]). + // This ensures that appending to keyFieldPath creates a new underlying array, avoiding accidental + // modification of the original slice (path). + keyFieldPath := append(path[:i+1:i+1], fieldpath.PathElement{FieldName: &keyName}) + keyFieldPathSet.Insert(keyFieldPath) + } + } + }) + items = items.Union(keyFieldPathSet) + } + } + tv.value = removeItemsWithSchema(tv.value, items, tv.schema, tv.typeRef, true) return &tv } diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go index f0d58d42c..88693b87e 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/reflectcache.go @@ -19,7 +19,9 @@ package value import ( "bytes" "encoding/json" + "errors" "fmt" + "io" "reflect" "sort" "sync" @@ -184,6 +186,11 @@ func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, er // This is based on https://github.com/kubernetes/kubernetes/blob/82c9e5c814eb7acc6cc0a090c057294d0667ad66/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L505 // and is intended to replace it. + // Check if the object is a nil pointer. + if sv.Kind() == reflect.Ptr && sv.IsNil() { + // We're done - we don't need to store anything. + return nil, nil + } // Check if the object has a custom string converter and use it if available, since it is much more efficient // than round tripping through json. if converter, ok := e.getUnstructuredConverter(sv); ok { @@ -191,11 +198,6 @@ func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, er } // Check if the object has a custom JSON marshaller/unmarshaller. if marshaler, ok := e.getJsonMarshaler(sv); ok { - if sv.Kind() == reflect.Ptr && sv.IsNil() { - // We're done - we don't need to store anything. - return nil, nil - } - data, err := marshaler.MarshalJSON() if err != nil { return nil, err @@ -379,34 +381,47 @@ const maxDepth = 10000 // unmarshal unmarshals the given data // If v is a *map[string]interface{}, numbers are converted to int64 or float64 func unmarshal(data []byte, v interface{}) error { + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + next := decoder.InputOffset() + if _, err := decoder.Token(); !errors.Is(err, io.EOF) { + tail := bytes.TrimLeft(data[next:], " \t\r\n") + return fmt.Errorf("unexpected trailing data at offset %d", len(data)-len(tail)) + } + + // If the decode succeeds, post-process the object to convert json.Number objects to int64 or float64 switch v := v.(type) { case *map[string]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 return convertMapNumbers(*v, 0) case *[]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 return convertSliceNumbers(*v, 0) + case *interface{}: + return convertInterfaceNumbers(v, 0) + default: - return json.Unmarshal(data, v) + return nil + } +} + +func convertInterfaceNumbers(v *interface{}, depth int) error { + var err error + switch v2 := (*v).(type) { + case json.Number: + *v, err = convertNumber(v2) + case map[string]interface{}: + err = convertMapNumbers(v2, depth+1) + case []interface{}: + err = convertSliceNumbers(v2, depth+1) } + return err } // convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go index c78a4c18d..5824219e5 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/scalar.go @@ -43,7 +43,7 @@ func IntCompare(lhs, rhs int64) int { func BoolCompare(lhs, rhs bool) int { if lhs == rhs { return 0 - } else if lhs == false { + } else if !lhs { return -1 } return 1 diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go index ea79e3a00..f72e5cd25 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go +++ b/vendor/sigs.k8s.io/structured-merge-diff/v4/value/value.go @@ -23,7 +23,7 @@ import ( "strings" jsoniter "github.com/json-iterator/go" - "gopkg.in/yaml.v2" + yaml "sigs.k8s.io/yaml/goyaml.v2" ) var (