diff --git a/go.mod b/go.mod index 4f731ebe6cc3..5b8a7601b557 100644 --- a/go.mod +++ b/go.mod @@ -32,10 +32,10 @@ require ( k8s.io/code-generator v0.27.6 k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f k8s.io/utils v0.0.0-20230209194617-a36077c30491 - knative.dev/caching v0.0.0-20231101191025-c6425778e5ba - knative.dev/hack v0.0.0-20231107173840-883479423aaa - knative.dev/networking v0.0.0-20231103063604-18529fd26a8b - knative.dev/pkg v0.0.0-20231107094615-5c9b7a8d8265 + knative.dev/caching v0.0.0-20231108204433-b3781bc47aeb + knative.dev/hack v0.0.0-20231109190034-5deaddeb51a7 + knative.dev/networking v0.0.0-20231108061732-e0bee342a97e + knative.dev/pkg v0.0.0-20231108014432-35011d423d4b sigs.k8s.io/yaml v1.4.0 ) diff --git a/go.sum b/go.sum index 098a33495816..017e50f6135d 100644 --- a/go.sum +++ b/go.sum @@ -926,14 +926,14 @@ k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5F k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -knative.dev/caching v0.0.0-20231101191025-c6425778e5ba h1:wMfEfoiu+yfpKG79k9MUhY4ww8p3YiuqQOt0QfgqMoE= -knative.dev/caching v0.0.0-20231101191025-c6425778e5ba/go.mod h1:36UniA7tdm8pj9dPAt44dneazLbcQqraNSftnOUYRks= -knative.dev/hack v0.0.0-20231107173840-883479423aaa h1:XVFqW2a8xvyo231TbVSD3i+580pVej9LbTSmYex6d1g= -knative.dev/hack v0.0.0-20231107173840-883479423aaa/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q= -knative.dev/networking v0.0.0-20231103063604-18529fd26a8b h1:bimYPtsVmXBGjA0dARsoyNFKtREIVceVScnKdsHmqjo= -knative.dev/networking v0.0.0-20231103063604-18529fd26a8b/go.mod h1:XKuKrS5QCQ3LkPeOYBZqyUxseBIBLpujxttejIBjoCk= -knative.dev/pkg v0.0.0-20231107094615-5c9b7a8d8265 h1:wFDUSmvSQF48tUCIUIFKMOxq9jpV+vXf5l+RZYxYyt4= -knative.dev/pkg v0.0.0-20231107094615-5c9b7a8d8265/go.mod h1:P3m1Mg/FJjmr9oFHfWcoUbJLSlBi/hgwakobPxyqrZ4= +knative.dev/caching v0.0.0-20231108204433-b3781bc47aeb h1:9kuTebXS3SuSxWLGr/5eplg8qu+xn9y/CjFHgVBBo2Q= +knative.dev/caching v0.0.0-20231108204433-b3781bc47aeb/go.mod h1:owQX47ghEY9OIaxvoTZ9KyJTfEiwLgwY94tyHoUlLUU= +knative.dev/hack v0.0.0-20231109190034-5deaddeb51a7 h1:HXf7M7n9jwn+Hp904r0HXRSymf+DLXSciFpXVpCg+Bs= +knative.dev/hack v0.0.0-20231109190034-5deaddeb51a7/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q= +knative.dev/networking v0.0.0-20231108061732-e0bee342a97e h1:IFZuDN6IA3lzGt3zVgQ1VbTPSdDcCkdvD0SmxZ3blBM= +knative.dev/networking v0.0.0-20231108061732-e0bee342a97e/go.mod h1:cu01aODvz01sLC80d7Md6M8pSFi7RMurQnCubeeVH40= +knative.dev/pkg v0.0.0-20231108014432-35011d423d4b h1:WrDo9M6vkJ4xnTBodWOT2koXjKqr7dOqJH4RWBq4kBw= +knative.dev/pkg v0.0.0-20231108014432-35011d423d4b/go.mod h1:zkycL49skv31nWKvT1XAsSMFO6mUu33Qhpv0xDvdVGY= pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/pkg/apis/serving/v1/revision_helpers.go b/pkg/apis/serving/v1/revision_helpers.go index e561c7ae6495..4270f94ac518 100644 --- a/pkg/apis/serving/v1/revision_helpers.go +++ b/pkg/apis/serving/v1/revision_helpers.go @@ -19,6 +19,7 @@ package v1 import ( "time" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" net "knative.dev/networking/pkg/apis/networking" "knative.dev/pkg/kmeta" @@ -143,3 +144,9 @@ func (rs *RevisionStatus) IsActivationRequired() bool { c := revisionCondSet.Manage(rs).GetCondition(RevisionConditionActive) return c != nil && c.Status != corev1.ConditionTrue } + +// IsReplicaSetFailure returns true if the deployment replicaset failed to create +func (rs *RevisionStatus) IsReplicaSetFailure(deploymentStatus *appsv1.DeploymentStatus) bool { + ds := serving.TransformDeploymentStatus(deploymentStatus) + return ds != nil && ds.GetCondition(serving.DeploymentConditionReplicaSetReady).IsFalse() +} diff --git a/pkg/apis/serving/v1/revision_helpers_test.go b/pkg/apis/serving/v1/revision_helpers_test.go index 65feea02ed5f..dd95c11b4b26 100644 --- a/pkg/apis/serving/v1/revision_helpers_test.go +++ b/pkg/apis/serving/v1/revision_helpers_test.go @@ -20,6 +20,7 @@ import ( "testing" "time" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" @@ -267,3 +268,45 @@ func TestSetRoutingState(t *testing.T) { t.Error("Expected default value for unparsable annotationm but got:", got) } } + +func TestIsReplicaSetFailure(t *testing.T) { + revisionStatus := RevisionStatus{} + cases := []struct { + name string + status appsv1.DeploymentStatus + IsReplicaSetFailure bool + }{{ + name: "empty deployment status should not be a failure", + status: appsv1.DeploymentStatus{}, + }, { + name: "Ready deployment status should not be a failure", + status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentAvailable, Status: corev1.ConditionTrue, + }}, + }, + }, { + name: "ReplicasetFailure true should be a failure", + status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentReplicaFailure, Status: corev1.ConditionTrue, + }}, + }, + IsReplicaSetFailure: true, + }, { + name: "ReplicasetFailure false should not be a failure", + status: appsv1.DeploymentStatus{ + Conditions: []appsv1.DeploymentCondition{{ + Type: appsv1.DeploymentReplicaFailure, Status: corev1.ConditionFalse, + }}, + }, + }} + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + if got, want := revisionStatus.IsReplicaSetFailure(&tc.status), tc.IsReplicaSetFailure; got != want { + t.Errorf("IsReplicaSetFailure = %v, want: %v", got, want) + } + }) + } +} diff --git a/pkg/apis/serving/v1/revision_lifecycle.go b/pkg/apis/serving/v1/revision_lifecycle.go index 48d2cbb1fb02..f95c93675aa3 100644 --- a/pkg/apis/serving/v1/revision_lifecycle.go +++ b/pkg/apis/serving/v1/revision_lifecycle.go @@ -51,6 +51,7 @@ const ( ReasonProgressDeadlineExceeded = "ProgressDeadlineExceeded" ) +// RevisionConditionActive is not part of the RevisionConditionSet because we can have Inactive Ready Revisions (scale to zero) var revisionCondSet = apis.NewLivingConditionSet( RevisionConditionResourcesAvailable, RevisionConditionContainerHealthy, @@ -171,7 +172,6 @@ func (rs *RevisionStatus) PropagateDeploymentStatus(original *appsv1.DeploymentS func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *autoscalingv1alpha1.PodAutoscalerStatus) { // Reflect the PA status in our own. cond := ps.GetCondition(autoscalingv1alpha1.PodAutoscalerConditionReady) - rs.ActualReplicas = nil if ps.ActualScale != nil && *ps.ActualScale >= 0 { rs.ActualReplicas = ps.ActualScale diff --git a/pkg/reconciler/revision/reconcile_resources.go b/pkg/reconciler/revision/reconcile_resources.go index 28a0eac64ef6..1bef83c102b0 100644 --- a/pkg/reconciler/revision/reconcile_resources.go +++ b/pkg/reconciler/revision/reconcile_resources.go @@ -79,6 +79,12 @@ func (c *Reconciler) reconcileDeployment(ctx context.Context, rev *v1.Revision) } } + // If the replicaset is failing we assume its an error we have to surface + if rev.Status.IsReplicaSetFailure(&deployment.Status) { + rev.Status.PropagateDeploymentStatus(&deployment.Status) + return nil + } + // If a container keeps crashing (no active pods in the deployment although we want some) if *deployment.Spec.Replicas > 0 && deployment.Status.AvailableReplicas == 0 { pods, err := c.kubeclient.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(deployment.Spec.Selector)}) diff --git a/pkg/reconciler/revision/table_test.go b/pkg/reconciler/revision/table_test.go index 84ace3c50844..26ed83d48d0b 100644 --- a/pkg/reconciler/revision/table_test.go +++ b/pkg/reconciler/revision/table_test.go @@ -471,6 +471,34 @@ func TestReconcile(t *testing.T) { Object: pa("foo", "deploy-timeout", WithReachabilityReachable), }}, Key: "foo/deploy-timeout", + }, { + Name: "revision failure because replicaset and deployment failed", + // This test defines a Revision InProgress with status Deploying but with a + // Deployment with a ReplicaSet failure, so the wanted status update is for + // the Deployment FailedCreate error to bubble up to the Revision + Objects: []runtime.Object{ + Revision("foo", "deploy-replicaset-failure", + WithLogURL, MarkActivating("Deploying", ""), + WithRoutingState(v1.RoutingStateActive, fc), + withDefaultContainerStatuses(), + WithRevisionObservedGeneration(1), + MarkContainerHealthyUnknown("Deploying"), + ), + pa("foo", "deploy-replicaset-failure", WithReachabilityUnreachable), + replicaFailureDeploy(deploy(t, "foo", "deploy-replicaset-failure"), "I ReplicaSet failed!"), + image("foo", "deploy-replicaset-failure"), + }, + WantStatusUpdates: []clientgotesting.UpdateActionImpl{{ + Object: Revision("foo", "deploy-replicaset-failure", + WithLogURL, MarkResourcesUnavailable("FailedCreate", "I ReplicaSet failed!"), + withDefaultContainerStatuses(), + WithRoutingState(v1.RoutingStateActive, fc), + MarkContainerHealthyUnknown("Deploying"), + WithRevisionObservedGeneration(1), + MarkActivating("Deploying", ""), + ), + }}, + Key: "foo/deploy-replicaset-failure", }, { Name: "surface replica failure", // Test the propagation of FailedCreate from Deployment. diff --git a/pkg/testing/v1/revision.go b/pkg/testing/v1/revision.go index 19fa8a2705ea..a3cd452fd9ef 100644 --- a/pkg/testing/v1/revision.go +++ b/pkg/testing/v1/revision.go @@ -152,6 +152,12 @@ func MarkDeploying(reason string) RevisionOption { } } +func MarkContainerHealthyUnknown(reason string) RevisionOption { + return func(r *v1.Revision) { + r.Status.MarkContainerHealthyUnknown(reason, "") + } +} + // MarkProgressDeadlineExceeded calls the method of the same name on the Revision // with the message we expect the Revision Reconciler to pass. func MarkProgressDeadlineExceeded(message string) RevisionOption { diff --git a/test/e2e/resource_quota_error_test.go b/test/e2e/resource_quota_error_test.go index 690de7aaa241..6c433ba02b12 100644 --- a/test/e2e/resource_quota_error_test.go +++ b/test/e2e/resource_quota_error_test.go @@ -40,10 +40,11 @@ func TestResourceQuotaError(t *testing.T) { clients := test.Setup(t, test.Options{Namespace: "rq-test"}) const ( - errorReason = "RevisionFailed" - errorMsgScale = "Initial scale was never achieved" - errorMsgQuota = "forbidden: exceeded quota" - revisionReason = "ProgressDeadlineExceeded" + errorReason = "RevisionFailed" + progressDeadlineReason = "ProgressDeadlineExceeded" + waitReason = "ContainerCreating" + errorMsgQuota = "forbidden: exceeded quota" + revisionReason = "RevisionFailed" ) names := test.ResourceNames{ Service: test.ObjectNameForTest(t), @@ -80,16 +81,12 @@ func TestResourceQuotaError(t *testing.T) { err = v1test.WaitForServiceState(clients.ServingClient, names.Service, func(r *v1.Service) (bool, error) { cond = r.Status.GetCondition(v1.ServiceConditionConfigurationsReady) if cond != nil && !cond.IsUnknown() { - // Can fail with either a progress deadline exceeded error or an exceeded resource quota error - if strings.Contains(cond.Message, errorMsgScale) && cond.IsFalse() { - return true, nil - } - if strings.Contains(cond.Message, errorMsgQuota) && cond.IsFalse() { + if cond.Reason == errorReason && cond.IsFalse() { return true, nil } t.Logf("Reason: %s ; Message: %s ; Status: %s", cond.Reason, cond.Message, cond.Status) return true, fmt.Errorf("the service %s was not marked with expected error condition (Reason=%q, Message=%q, Status=%q), but with (Reason=%q, Message=%q, Status=%q)", - names.Config, errorReason, errorMsgScale, "False", cond.Reason, cond.Message, cond.Status) + names.Config, errorReason, "", "False", cond.Reason, cond.Message, cond.Status) } return false, nil }, "ContainerUnscheduleable") @@ -113,15 +110,19 @@ func TestResourceQuotaError(t *testing.T) { err = v1test.CheckRevisionState(clients.ServingClient, revisionName, func(r *v1.Revision) (bool, error) { cond := r.Status.GetCondition(v1.RevisionConditionReady) if cond != nil { - // Can fail with either a progress deadline exceeded error or an exceeded resource quota error - if cond.Reason == revisionReason && strings.Contains(cond.Message, errorMsgScale) { + if strings.Contains(cond.Message, errorMsgQuota) && cond.IsFalse() { return true, nil } - if strings.Contains(cond.Message, errorMsgQuota) && cond.IsFalse() { + // Can fail with either a progress deadline exceeded error + if cond.Reason == progressDeadlineReason { return true, nil } + // wait for the container creation + if cond.Reason == waitReason { + return false, nil + } return true, fmt.Errorf("the revision %s was not marked with expected error condition (Reason=%q, Message=%q), but with (Reason=%q, Message=%q)", - revisionName, revisionReason, errorMsgScale, cond.Reason, cond.Message) + revisionName, revisionReason, errorMsgQuota, cond.Reason, cond.Message) } return false, nil }) diff --git a/third_party/cert-manager-latest/net-certmanager.yaml b/third_party/cert-manager-latest/net-certmanager.yaml index 1b8873428592..9680edda315c 100644 --- a/third_party/cert-manager-latest/net-certmanager.yaml +++ b/third_party/cert-manager-latest/net-certmanager.yaml @@ -19,7 +19,7 @@ metadata: name: knative-serving-certmanager labels: app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving serving.knative.dev/controller: "true" networking.knative.dev/certificate-provider: cert-manager @@ -52,7 +52,7 @@ metadata: name: config.webhook.net-certmanager.networking.internal.knative.dev labels: app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving networking.knative.dev/certificate-provider: cert-manager webhooks: @@ -93,7 +93,7 @@ metadata: namespace: knative-serving labels: app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving networking.knative.dev/certificate-provider: cert-manager @@ -119,7 +119,7 @@ metadata: namespace: knative-serving labels: app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving networking.knative.dev/certificate-provider: cert-manager data: @@ -178,7 +178,7 @@ metadata: namespace: knative-serving labels: app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving networking.knative.dev/certificate-provider: cert-manager spec: @@ -190,7 +190,7 @@ spec: labels: app: net-certmanager-controller app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving spec: serviceAccountName: controller @@ -198,7 +198,7 @@ spec: - name: controller # This is the Go import path for the binary that is containerized # and substituted here. - image: gcr.io/knative-nightly/knative.dev/net-certmanager/cmd/controller@sha256:93daea216a5ad09d597114c7749e0de33745f1941fe31eb7cea6d76e27b02d24 + image: gcr.io/knative-nightly/knative.dev/net-certmanager/cmd/controller@sha256:c871cd1202050c852102d33de1b4692a11fc042423995a1e7445d770752e1f1d resources: requests: cpu: 30m @@ -239,7 +239,7 @@ metadata: labels: app: net-certmanager-controller app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving networking.knative.dev/certificate-provider: cert-manager name: net-certmanager-controller @@ -277,7 +277,7 @@ metadata: name: selfsigned-cluster-issuer labels: app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving networking.knative.dev/certificate-provider: cert-manager spec: @@ -289,7 +289,7 @@ metadata: name: knative-internal-encryption-issuer labels: app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving networking.knative.dev/certificate-provider: cert-manager spec: @@ -303,7 +303,7 @@ metadata: namespace: cert-manager # If you want to use it as a ClusterIssuer the secret must be in the cert-manager namespace. labels: app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving networking.knative.dev/certificate-provider: cert-manager spec: @@ -338,7 +338,7 @@ metadata: namespace: knative-serving labels: app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving networking.knative.dev/certificate-provider: cert-manager spec: @@ -351,7 +351,7 @@ spec: labels: app: net-certmanager-webhook app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving role: net-certmanager-webhook spec: @@ -360,7 +360,7 @@ spec: - name: webhook # This is the Go import path for the binary that is containerized # and substituted here. - image: gcr.io/knative-nightly/knative.dev/net-certmanager/cmd/webhook@sha256:7aa5e2077da3f3c4bf026db19a673910b3b016ba9ce0a4482c506e13d461ee7e + image: gcr.io/knative-nightly/knative.dev/net-certmanager/cmd/webhook@sha256:5d3ae5ae850e2448107eb295ec90ee1a4ad102aeb3ded453a94e1c83b4d626b3 resources: requests: cpu: 20m @@ -426,7 +426,7 @@ metadata: labels: role: net-certmanager-webhook app.kubernetes.io/component: net-certmanager - app.kubernetes.io/version: "20231108-11e6219e" + app.kubernetes.io/version: "20231110-8b2a470c" app.kubernetes.io/name: knative-serving networking.knative.dev/certificate-provider: cert-manager spec: diff --git a/third_party/contour-latest/net-contour.yaml b/third_party/contour-latest/net-contour.yaml index 1e9de3af2440..bbefadd915fd 100644 --- a/third_party/contour-latest/net-contour.yaml +++ b/third_party/contour-latest/net-contour.yaml @@ -8,7 +8,7 @@ metadata: networking.knative.dev/ingress-provider: contour app.kubernetes.io/component: net-contour app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231109-467a573d" + app.kubernetes.io/version: "20231110-d2054f2c" serving.knative.dev/controller: "true" rules: - apiGroups: ["projectcontour.io"] @@ -38,7 +38,7 @@ metadata: networking.knative.dev/ingress-provider: contour app.kubernetes.io/component: net-contour app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231109-467a573d" + app.kubernetes.io/version: "20231110-d2054f2c" data: _example: | ################################ @@ -95,7 +95,7 @@ metadata: networking.knative.dev/ingress-provider: contour app.kubernetes.io/component: net-contour app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231109-467a573d" + app.kubernetes.io/version: "20231110-d2054f2c" spec: replicas: 1 selector: @@ -107,14 +107,14 @@ spec: app: net-contour-controller app.kubernetes.io/component: net-contour app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231109-467a573d" + app.kubernetes.io/version: "20231110-d2054f2c" spec: serviceAccountName: controller containers: - name: controller # This is the Go import path for the binary that is containerized # and substituted here. - image: gcr.io/knative-nightly/knative.dev/net-contour/cmd/controller@sha256:a9792e2245a2c017d3a0a20c86f2dbd393e27e8cdb79e1659f86544bc3599992 + image: gcr.io/knative-nightly/knative.dev/net-contour/cmd/controller@sha256:8a72ebc1bfaaa352602484bf3f0c3596e1feda88dad141a7d219caddf2b5c6d6 resources: requests: cpu: 40m @@ -158,7 +158,7 @@ metadata: networking.knative.dev/ingress-provider: contour app.kubernetes.io/component: net-contour app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231109-467a573d" + app.kubernetes.io/version: "20231110-d2054f2c" spec: delegations: - secretName: knative-serving-certs diff --git a/third_party/gateway-api-latest/istio-gateway.yaml b/third_party/gateway-api-latest/istio-gateway.yaml index ea21b0f82e9b..95d8748da1b1 100644 --- a/third_party/gateway-api-latest/istio-gateway.yaml +++ b/third_party/gateway-api-latest/istio-gateway.yaml @@ -22,7 +22,7 @@ metadata: labels: app.kubernetes.io/component: net-gateway-api app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231103-a8d56a3a" + app.kubernetes.io/version: "20231113-29bf0b93" experimental.istio.io/disable-gateway-port-translation: "true" spec: type: ClusterIP diff --git a/third_party/gateway-api-latest/net-gateway-api.yaml b/third_party/gateway-api-latest/net-gateway-api.yaml index 2bcf4fff69d9..fd9d6a364fe3 100644 --- a/third_party/gateway-api-latest/net-gateway-api.yaml +++ b/third_party/gateway-api-latest/net-gateway-api.yaml @@ -20,7 +20,7 @@ metadata: networking.knative.dev/ingress-provider: net-gateway-api app.kubernetes.io/component: net-gateway-api app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231103-a8d56a3a" + app.kubernetes.io/version: "20231113-29bf0b93" aggregationRule: clusterRoleSelectors: - matchLabels: @@ -36,7 +36,7 @@ metadata: networking.knative.dev/ingress-provider: net-gateway-api app.kubernetes.io/component: net-gateway-api app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231103-a8d56a3a" + app.kubernetes.io/version: "20231113-29bf0b93" rules: - apiGroups: ["gateway.networking.k8s.io"] resources: ["httproutes", "referencegrants", "referencepolicies"] @@ -69,7 +69,7 @@ metadata: networking.knative.dev/ingress-provider: net-gateway-api app.kubernetes.io/component: net-gateway-api app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231103-a8d56a3a" + app.kubernetes.io/version: "20231113-29bf0b93" data: _example: | ################################ @@ -123,7 +123,7 @@ metadata: labels: networking.knative.dev/ingress-provider: net-gateway-api app.kubernetes.io/component: net-gateway-api - app.kubernetes.io/version: "20231103-a8d56a3a" + app.kubernetes.io/version: "20231113-29bf0b93" app.kubernetes.io/name: knative-serving spec: replicas: 1 @@ -150,7 +150,7 @@ spec: - name: controller # This is the Go import path for the binary that is containerized # and substituted here. - image: gcr.io/knative-nightly/knative.dev/net-gateway-api/cmd/controller@sha256:566aa97879937828f5a47691125844f9e3ee26619793e53dc7ddb6f807c4d9db + image: gcr.io/knative-nightly/knative.dev/net-gateway-api/cmd/controller@sha256:04a3416b0e887d2949f5c2b1b1d5507c1b6e785f358f5f7d84c409f160da8ea0 resources: requests: cpu: 100m diff --git a/third_party/istio-latest/net-istio.yaml b/third_party/istio-latest/net-istio.yaml index 3f32e3b03f84..7a9464e2ecb4 100644 --- a/third_party/istio-latest/net-istio.yaml +++ b/third_party/istio-latest/net-istio.yaml @@ -1,4 +1,4 @@ -# Generated when HEAD was 7f77e97abb1d183af8bbca5e6b29796247118bc2 +# Generated when HEAD was e3db912dec6da0e3b9c663a573862be02131f311 # # Copyright 2019 The Knative Authors # @@ -22,7 +22,7 @@ metadata: labels: app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" serving.knative.dev/controller: "true" networking.knative.dev/ingress-provider: istio rules: @@ -54,7 +54,7 @@ metadata: labels: app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" networking.knative.dev/ingress-provider: istio spec: selector: @@ -93,7 +93,7 @@ metadata: labels: app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" networking.knative.dev/ingress-provider: istio spec: selector: @@ -114,7 +114,7 @@ metadata: labels: app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" networking.knative.dev/ingress-provider: istio experimental.istio.io/disable-gateway-port-translation: "true" spec: @@ -149,7 +149,7 @@ metadata: labels: app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" networking.knative.dev/ingress-provider: istio data: # TODO(nghia): Extract the .svc.cluster.local suffix into its own config. @@ -203,7 +203,7 @@ metadata: labels: app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" networking.knative.dev/ingress-provider: istio spec: selector: @@ -221,7 +221,7 @@ metadata: labels: app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" networking.knative.dev/ingress-provider: istio spec: selector: @@ -254,7 +254,7 @@ metadata: labels: app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" networking.knative.dev/ingress-provider: istio spec: selector: @@ -271,14 +271,14 @@ spec: app: net-istio-controller app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" spec: serviceAccountName: controller containers: - name: controller # This is the Go import path for the binary that is containerized # and substituted here. - image: gcr.io/knative-nightly/knative.dev/net-istio/cmd/controller@sha256:197b4cdefbf8a4e3a863c0715744a627075943e6273c9b5f0922f5a03c85f134 + image: gcr.io/knative-nightly/knative.dev/net-istio/cmd/controller@sha256:a19396c1ad746722dd499fa7d24188aac657a1ea9c56c4babd29041245bb8906 resources: requests: cpu: 30m @@ -357,7 +357,7 @@ metadata: labels: app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" networking.knative.dev/ingress-provider: istio spec: selector: @@ -371,14 +371,14 @@ spec: role: net-istio-webhook app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" spec: serviceAccountName: controller containers: - name: webhook # This is the Go import path for the binary that is containerized # and substituted here. - image: gcr.io/knative-nightly/knative.dev/net-istio/cmd/webhook@sha256:1ad610463d21757ebb5dd34026112fa53f54c8ea8216f0e49a19ed28621bfc80 + image: gcr.io/knative-nightly/knative.dev/net-istio/cmd/webhook@sha256:8b74e6a48e06f44260785a23c5afd577877db07f9fa86557e7ba1a233908723e resources: requests: cpu: 20m @@ -462,7 +462,7 @@ metadata: labels: app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" networking.knative.dev/ingress-provider: istio --- @@ -489,7 +489,7 @@ metadata: role: net-istio-webhook app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" networking.knative.dev/ingress-provider: istio spec: ports: @@ -528,7 +528,7 @@ metadata: labels: app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" networking.knative.dev/ingress-provider: istio webhooks: - admissionReviewVersions: @@ -567,7 +567,7 @@ metadata: labels: app.kubernetes.io/component: net-istio app.kubernetes.io/name: knative-serving - app.kubernetes.io/version: "20231106-7f77e97a" + app.kubernetes.io/version: "20231110-e3db912d" networking.knative.dev/ingress-provider: istio webhooks: - admissionReviewVersions: diff --git a/third_party/kourier-latest/kourier.yaml b/third_party/kourier-latest/kourier.yaml index 172684ed7461..dfbabaa5ea36 100644 --- a/third_party/kourier-latest/kourier.yaml +++ b/third_party/kourier-latest/kourier.yaml @@ -20,7 +20,7 @@ metadata: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/name: knative-serving app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" --- # Copyright 2020 The Knative Authors @@ -45,7 +45,7 @@ metadata: labels: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" app.kubernetes.io/name: knative-serving data: envoy-bootstrap.yaml: | @@ -168,7 +168,7 @@ metadata: labels: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" app.kubernetes.io/name: knative-serving data: _example: | @@ -248,7 +248,7 @@ metadata: labels: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" app.kubernetes.io/name: knative-serving --- apiVersion: rbac.authorization.k8s.io/v1 @@ -258,7 +258,7 @@ metadata: labels: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" app.kubernetes.io/name: knative-serving rules: - apiGroups: [""] @@ -287,7 +287,7 @@ metadata: labels: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" app.kubernetes.io/name: knative-serving roleRef: apiGroup: rbac.authorization.k8s.io @@ -321,7 +321,7 @@ metadata: labels: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" app.kubernetes.io/name: knative-serving spec: strategy: @@ -343,7 +343,7 @@ spec: app: net-kourier-controller spec: containers: - - image: gcr.io/knative-nightly/knative.dev/net-kourier/cmd/kourier@sha256:081ec69693b10b23ea1fa2b94bdb1743d689934c5f8c5a18026de60c509f5594 + - image: gcr.io/knative-nightly/knative.dev/net-kourier/cmd/kourier@sha256:5bbe77dc277b0ad61572d5816d386738c82099d3ba8bba2910d9be699cbba522 name: controller env: - name: CERTS_SECRET_NAMESPACE @@ -408,7 +408,7 @@ metadata: labels: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" app.kubernetes.io/name: knative-serving spec: ports: @@ -443,7 +443,7 @@ metadata: labels: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" app.kubernetes.io/name: knative-serving spec: strategy: @@ -552,7 +552,7 @@ metadata: labels: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" app.kubernetes.io/name: knative-serving spec: ports: @@ -576,7 +576,7 @@ metadata: labels: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" app.kubernetes.io/name: knative-serving spec: ports: @@ -600,7 +600,7 @@ metadata: labels: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" app.kubernetes.io/name: knative-serving spec: minReplicas: 1 @@ -626,7 +626,7 @@ metadata: labels: networking.knative.dev/ingress-provider: kourier app.kubernetes.io/component: net-kourier - app.kubernetes.io/version: "20231107-6e4d79d3" + app.kubernetes.io/version: "20231110-ad58d905" app.kubernetes.io/name: knative-serving spec: minAvailable: 80% diff --git a/vendor/knative.dev/hack/release.sh b/vendor/knative.dev/hack/release.sh index 68e75e9ca0dc..256ae40d0eab 100644 --- a/vendor/knative.dev/hack/release.sh +++ b/vendor/knative.dev/hack/release.sh @@ -658,13 +658,18 @@ function publish_artifacts() { # Sets the github release with the highest semver to 'latest' function set_latest_to_highest_semver() { - local last_version # don't combine with the line below, or $? will be 0 + if ! (( PUBLISH_TO_GITHUB )); then + return 0 + fi + echo "Setting latest release to highest semver" + + local last_version release_id # don't combine with assignment else $? will be 0 + last_version="$(hub_tool -p release | cut -d'-' -f2 | grep '^v[0-9]\+\.[0-9]\+\.[0-9]\+$'| sort -r -V | head -1)" if ! [[ $? -eq 0 ]]; then abort "cannot list releases" fi - local release_id # don't combine with the line below, or $? will be 0 release_id="$(hub_tool api "/repos/${ORG_NAME}/${REPO_NAME}/releases/tags/knative-${last_version}" | jq .id)" if [[ $? -ne 0 ]]; then abort "cannot get relase id from github" @@ -701,6 +706,8 @@ function main() { function_exists build_release || abort "function 'build_release()' not defined" [[ -x ${VALIDATION_TESTS} ]] || abort "test script '${VALIDATION_TESTS}' doesn't exist" + banner "Environment variables" + env # Log what will be done and where. banner "Release configuration" if which gcloud &>/dev/null ; then diff --git a/vendor/modules.txt b/vendor/modules.txt index f2924fc6ca9f..1ae858b849e7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1277,7 +1277,7 @@ k8s.io/utils/net k8s.io/utils/pointer k8s.io/utils/strings/slices k8s.io/utils/trace -# knative.dev/caching v0.0.0-20231101191025-c6425778e5ba +# knative.dev/caching v0.0.0-20231108204433-b3781bc47aeb ## explicit; go 1.18 knative.dev/caching/config knative.dev/caching/pkg/apis/caching @@ -1298,10 +1298,10 @@ knative.dev/caching/pkg/client/injection/informers/caching/v1alpha1/image/fake knative.dev/caching/pkg/client/injection/informers/factory knative.dev/caching/pkg/client/injection/informers/factory/fake knative.dev/caching/pkg/client/listers/caching/v1alpha1 -# knative.dev/hack v0.0.0-20231107173840-883479423aaa +# knative.dev/hack v0.0.0-20231109190034-5deaddeb51a7 ## explicit; go 1.18 knative.dev/hack -# knative.dev/networking v0.0.0-20231103063604-18529fd26a8b +# knative.dev/networking v0.0.0-20231108061732-e0bee342a97e ## explicit; go 1.18 knative.dev/networking/config knative.dev/networking/pkg @@ -1340,7 +1340,7 @@ knative.dev/networking/pkg/http/stats knative.dev/networking/pkg/ingress knative.dev/networking/pkg/k8s knative.dev/networking/pkg/prober -# knative.dev/pkg v0.0.0-20231107094615-5c9b7a8d8265 +# knative.dev/pkg v0.0.0-20231108014432-35011d423d4b ## explicit; go 1.18 knative.dev/pkg/apiextensions/storageversion knative.dev/pkg/apiextensions/storageversion/cmd/migrate