Skip to content

Commit

Permalink
Merge branch 'knative:main' into issue-12691
Browse files Browse the repository at this point in the history
  • Loading branch information
andrew-delph authored Nov 13, 2023
2 parents 97d117f + d3127e9 commit f716986
Show file tree
Hide file tree
Showing 17 changed files with 190 additions and 92 deletions.
8 changes: 4 additions & 4 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,10 @@ require (
k8s.io/code-generator v0.27.6
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f
k8s.io/utils v0.0.0-20230209194617-a36077c30491
knative.dev/caching v0.0.0-20231101191025-c6425778e5ba
knative.dev/hack v0.0.0-20231107173840-883479423aaa
knative.dev/networking v0.0.0-20231103063604-18529fd26a8b
knative.dev/pkg v0.0.0-20231107094615-5c9b7a8d8265
knative.dev/caching v0.0.0-20231108204433-b3781bc47aeb
knative.dev/hack v0.0.0-20231109190034-5deaddeb51a7
knative.dev/networking v0.0.0-20231108061732-e0bee342a97e
knative.dev/pkg v0.0.0-20231108014432-35011d423d4b
sigs.k8s.io/yaml v1.4.0
)

Expand Down
16 changes: 8 additions & 8 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -926,14 +926,14 @@ k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5F
k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg=
k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY=
k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/caching v0.0.0-20231101191025-c6425778e5ba h1:wMfEfoiu+yfpKG79k9MUhY4ww8p3YiuqQOt0QfgqMoE=
knative.dev/caching v0.0.0-20231101191025-c6425778e5ba/go.mod h1:36UniA7tdm8pj9dPAt44dneazLbcQqraNSftnOUYRks=
knative.dev/hack v0.0.0-20231107173840-883479423aaa h1:XVFqW2a8xvyo231TbVSD3i+580pVej9LbTSmYex6d1g=
knative.dev/hack v0.0.0-20231107173840-883479423aaa/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/networking v0.0.0-20231103063604-18529fd26a8b h1:bimYPtsVmXBGjA0dARsoyNFKtREIVceVScnKdsHmqjo=
knative.dev/networking v0.0.0-20231103063604-18529fd26a8b/go.mod h1:XKuKrS5QCQ3LkPeOYBZqyUxseBIBLpujxttejIBjoCk=
knative.dev/pkg v0.0.0-20231107094615-5c9b7a8d8265 h1:wFDUSmvSQF48tUCIUIFKMOxq9jpV+vXf5l+RZYxYyt4=
knative.dev/pkg v0.0.0-20231107094615-5c9b7a8d8265/go.mod h1:P3m1Mg/FJjmr9oFHfWcoUbJLSlBi/hgwakobPxyqrZ4=
knative.dev/caching v0.0.0-20231108204433-b3781bc47aeb h1:9kuTebXS3SuSxWLGr/5eplg8qu+xn9y/CjFHgVBBo2Q=
knative.dev/caching v0.0.0-20231108204433-b3781bc47aeb/go.mod h1:owQX47ghEY9OIaxvoTZ9KyJTfEiwLgwY94tyHoUlLUU=
knative.dev/hack v0.0.0-20231109190034-5deaddeb51a7 h1:HXf7M7n9jwn+Hp904r0HXRSymf+DLXSciFpXVpCg+Bs=
knative.dev/hack v0.0.0-20231109190034-5deaddeb51a7/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/networking v0.0.0-20231108061732-e0bee342a97e h1:IFZuDN6IA3lzGt3zVgQ1VbTPSdDcCkdvD0SmxZ3blBM=
knative.dev/networking v0.0.0-20231108061732-e0bee342a97e/go.mod h1:cu01aODvz01sLC80d7Md6M8pSFi7RMurQnCubeeVH40=
knative.dev/pkg v0.0.0-20231108014432-35011d423d4b h1:WrDo9M6vkJ4xnTBodWOT2koXjKqr7dOqJH4RWBq4kBw=
knative.dev/pkg v0.0.0-20231108014432-35011d423d4b/go.mod h1:zkycL49skv31nWKvT1XAsSMFO6mUu33Qhpv0xDvdVGY=
pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
Expand Down
7 changes: 7 additions & 0 deletions pkg/apis/serving/v1/revision_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package v1
import (
"time"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
net "knative.dev/networking/pkg/apis/networking"
"knative.dev/pkg/kmeta"
Expand Down Expand Up @@ -143,3 +144,9 @@ func (rs *RevisionStatus) IsActivationRequired() bool {
c := revisionCondSet.Manage(rs).GetCondition(RevisionConditionActive)
return c != nil && c.Status != corev1.ConditionTrue
}

// IsReplicaSetFailure returns true if the deployment replicaset failed to create
func (rs *RevisionStatus) IsReplicaSetFailure(deploymentStatus *appsv1.DeploymentStatus) bool {
ds := serving.TransformDeploymentStatus(deploymentStatus)
return ds != nil && ds.GetCondition(serving.DeploymentConditionReplicaSetReady).IsFalse()
}
43 changes: 43 additions & 0 deletions pkg/apis/serving/v1/revision_helpers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"testing"
"time"

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"

Expand Down Expand Up @@ -267,3 +268,45 @@ func TestSetRoutingState(t *testing.T) {
t.Error("Expected default value for unparsable annotationm but got:", got)
}
}

func TestIsReplicaSetFailure(t *testing.T) {
revisionStatus := RevisionStatus{}
cases := []struct {
name string
status appsv1.DeploymentStatus
IsReplicaSetFailure bool
}{{
name: "empty deployment status should not be a failure",
status: appsv1.DeploymentStatus{},
}, {
name: "Ready deployment status should not be a failure",
status: appsv1.DeploymentStatus{
Conditions: []appsv1.DeploymentCondition{{
Type: appsv1.DeploymentAvailable, Status: corev1.ConditionTrue,
}},
},
}, {
name: "ReplicasetFailure true should be a failure",
status: appsv1.DeploymentStatus{
Conditions: []appsv1.DeploymentCondition{{
Type: appsv1.DeploymentReplicaFailure, Status: corev1.ConditionTrue,
}},
},
IsReplicaSetFailure: true,
}, {
name: "ReplicasetFailure false should not be a failure",
status: appsv1.DeploymentStatus{
Conditions: []appsv1.DeploymentCondition{{
Type: appsv1.DeploymentReplicaFailure, Status: corev1.ConditionFalse,
}},
},
}}

for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
if got, want := revisionStatus.IsReplicaSetFailure(&tc.status), tc.IsReplicaSetFailure; got != want {
t.Errorf("IsReplicaSetFailure = %v, want: %v", got, want)
}
})
}
}
2 changes: 1 addition & 1 deletion pkg/apis/serving/v1/revision_lifecycle.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ const (
ReasonProgressDeadlineExceeded = "ProgressDeadlineExceeded"
)

// RevisionConditionActive is not part of the RevisionConditionSet because we can have Inactive Ready Revisions (scale to zero)
var revisionCondSet = apis.NewLivingConditionSet(
RevisionConditionResourcesAvailable,
RevisionConditionContainerHealthy,
Expand Down Expand Up @@ -171,7 +172,6 @@ func (rs *RevisionStatus) PropagateDeploymentStatus(original *appsv1.DeploymentS
func (rs *RevisionStatus) PropagateAutoscalerStatus(ps *autoscalingv1alpha1.PodAutoscalerStatus) {
// Reflect the PA status in our own.
cond := ps.GetCondition(autoscalingv1alpha1.PodAutoscalerConditionReady)

rs.ActualReplicas = nil
if ps.ActualScale != nil && *ps.ActualScale >= 0 {
rs.ActualReplicas = ps.ActualScale
Expand Down
6 changes: 6 additions & 0 deletions pkg/reconciler/revision/reconcile_resources.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,12 @@ func (c *Reconciler) reconcileDeployment(ctx context.Context, rev *v1.Revision)
}
}

// If the replicaset is failing we assume its an error we have to surface
if rev.Status.IsReplicaSetFailure(&deployment.Status) {
rev.Status.PropagateDeploymentStatus(&deployment.Status)
return nil
}

// If a container keeps crashing (no active pods in the deployment although we want some)
if *deployment.Spec.Replicas > 0 && deployment.Status.AvailableReplicas == 0 {
pods, err := c.kubeclient.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{LabelSelector: metav1.FormatLabelSelector(deployment.Spec.Selector)})
Expand Down
28 changes: 28 additions & 0 deletions pkg/reconciler/revision/table_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -471,6 +471,34 @@ func TestReconcile(t *testing.T) {
Object: pa("foo", "deploy-timeout", WithReachabilityReachable),
}},
Key: "foo/deploy-timeout",
}, {
Name: "revision failure because replicaset and deployment failed",
// This test defines a Revision InProgress with status Deploying but with a
// Deployment with a ReplicaSet failure, so the wanted status update is for
// the Deployment FailedCreate error to bubble up to the Revision
Objects: []runtime.Object{
Revision("foo", "deploy-replicaset-failure",
WithLogURL, MarkActivating("Deploying", ""),
WithRoutingState(v1.RoutingStateActive, fc),
withDefaultContainerStatuses(),
WithRevisionObservedGeneration(1),
MarkContainerHealthyUnknown("Deploying"),
),
pa("foo", "deploy-replicaset-failure", WithReachabilityUnreachable),
replicaFailureDeploy(deploy(t, "foo", "deploy-replicaset-failure"), "I ReplicaSet failed!"),
image("foo", "deploy-replicaset-failure"),
},
WantStatusUpdates: []clientgotesting.UpdateActionImpl{{
Object: Revision("foo", "deploy-replicaset-failure",
WithLogURL, MarkResourcesUnavailable("FailedCreate", "I ReplicaSet failed!"),
withDefaultContainerStatuses(),
WithRoutingState(v1.RoutingStateActive, fc),
MarkContainerHealthyUnknown("Deploying"),
WithRevisionObservedGeneration(1),
MarkActivating("Deploying", ""),
),
}},
Key: "foo/deploy-replicaset-failure",
}, {
Name: "surface replica failure",
// Test the propagation of FailedCreate from Deployment.
Expand Down
6 changes: 6 additions & 0 deletions pkg/testing/v1/revision.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,12 @@ func MarkDeploying(reason string) RevisionOption {
}
}

func MarkContainerHealthyUnknown(reason string) RevisionOption {
return func(r *v1.Revision) {
r.Status.MarkContainerHealthyUnknown(reason, "")
}
}

// MarkProgressDeadlineExceeded calls the method of the same name on the Revision
// with the message we expect the Revision Reconciler to pass.
func MarkProgressDeadlineExceeded(message string) RevisionOption {
Expand Down
29 changes: 15 additions & 14 deletions test/e2e/resource_quota_error_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,11 @@ func TestResourceQuotaError(t *testing.T) {

clients := test.Setup(t, test.Options{Namespace: "rq-test"})
const (
errorReason = "RevisionFailed"
errorMsgScale = "Initial scale was never achieved"
errorMsgQuota = "forbidden: exceeded quota"
revisionReason = "ProgressDeadlineExceeded"
errorReason = "RevisionFailed"
progressDeadlineReason = "ProgressDeadlineExceeded"
waitReason = "ContainerCreating"
errorMsgQuota = "forbidden: exceeded quota"
revisionReason = "RevisionFailed"
)
names := test.ResourceNames{
Service: test.ObjectNameForTest(t),
Expand Down Expand Up @@ -80,16 +81,12 @@ func TestResourceQuotaError(t *testing.T) {
err = v1test.WaitForServiceState(clients.ServingClient, names.Service, func(r *v1.Service) (bool, error) {
cond = r.Status.GetCondition(v1.ServiceConditionConfigurationsReady)
if cond != nil && !cond.IsUnknown() {
// Can fail with either a progress deadline exceeded error or an exceeded resource quota error
if strings.Contains(cond.Message, errorMsgScale) && cond.IsFalse() {
return true, nil
}
if strings.Contains(cond.Message, errorMsgQuota) && cond.IsFalse() {
if cond.Reason == errorReason && cond.IsFalse() {
return true, nil
}
t.Logf("Reason: %s ; Message: %s ; Status: %s", cond.Reason, cond.Message, cond.Status)
return true, fmt.Errorf("the service %s was not marked with expected error condition (Reason=%q, Message=%q, Status=%q), but with (Reason=%q, Message=%q, Status=%q)",
names.Config, errorReason, errorMsgScale, "False", cond.Reason, cond.Message, cond.Status)
names.Config, errorReason, "", "False", cond.Reason, cond.Message, cond.Status)
}
return false, nil
}, "ContainerUnscheduleable")
Expand All @@ -113,15 +110,19 @@ func TestResourceQuotaError(t *testing.T) {
err = v1test.CheckRevisionState(clients.ServingClient, revisionName, func(r *v1.Revision) (bool, error) {
cond := r.Status.GetCondition(v1.RevisionConditionReady)
if cond != nil {
// Can fail with either a progress deadline exceeded error or an exceeded resource quota error
if cond.Reason == revisionReason && strings.Contains(cond.Message, errorMsgScale) {
if strings.Contains(cond.Message, errorMsgQuota) && cond.IsFalse() {
return true, nil
}
if strings.Contains(cond.Message, errorMsgQuota) && cond.IsFalse() {
// Can fail with either a progress deadline exceeded error
if cond.Reason == progressDeadlineReason {
return true, nil
}
// wait for the container creation
if cond.Reason == waitReason {
return false, nil
}
return true, fmt.Errorf("the revision %s was not marked with expected error condition (Reason=%q, Message=%q), but with (Reason=%q, Message=%q)",
revisionName, revisionReason, errorMsgScale, cond.Reason, cond.Message)
revisionName, revisionReason, errorMsgQuota, cond.Reason, cond.Message)
}
return false, nil
})
Expand Down
30 changes: 15 additions & 15 deletions third_party/cert-manager-latest/net-certmanager.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ metadata:
name: knative-serving-certmanager
labels:
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
serving.knative.dev/controller: "true"
networking.knative.dev/certificate-provider: cert-manager
Expand Down Expand Up @@ -52,7 +52,7 @@ metadata:
name: config.webhook.net-certmanager.networking.internal.knative.dev
labels:
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
networking.knative.dev/certificate-provider: cert-manager
webhooks:
Expand Down Expand Up @@ -93,7 +93,7 @@ metadata:
namespace: knative-serving
labels:
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
networking.knative.dev/certificate-provider: cert-manager

Expand All @@ -119,7 +119,7 @@ metadata:
namespace: knative-serving
labels:
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
networking.knative.dev/certificate-provider: cert-manager
data:
Expand Down Expand Up @@ -178,7 +178,7 @@ metadata:
namespace: knative-serving
labels:
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
networking.knative.dev/certificate-provider: cert-manager
spec:
Expand All @@ -190,15 +190,15 @@ spec:
labels:
app: net-certmanager-controller
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
spec:
serviceAccountName: controller
containers:
- name: controller
# This is the Go import path for the binary that is containerized
# and substituted here.
image: gcr.io/knative-nightly/knative.dev/net-certmanager/cmd/controller@sha256:93daea216a5ad09d597114c7749e0de33745f1941fe31eb7cea6d76e27b02d24
image: gcr.io/knative-nightly/knative.dev/net-certmanager/cmd/controller@sha256:c871cd1202050c852102d33de1b4692a11fc042423995a1e7445d770752e1f1d
resources:
requests:
cpu: 30m
Expand Down Expand Up @@ -239,7 +239,7 @@ metadata:
labels:
app: net-certmanager-controller
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
networking.knative.dev/certificate-provider: cert-manager
name: net-certmanager-controller
Expand Down Expand Up @@ -277,7 +277,7 @@ metadata:
name: selfsigned-cluster-issuer
labels:
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
networking.knative.dev/certificate-provider: cert-manager
spec:
Expand All @@ -289,7 +289,7 @@ metadata:
name: knative-internal-encryption-issuer
labels:
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
networking.knative.dev/certificate-provider: cert-manager
spec:
Expand All @@ -303,7 +303,7 @@ metadata:
namespace: cert-manager # If you want to use it as a ClusterIssuer the secret must be in the cert-manager namespace.
labels:
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
networking.knative.dev/certificate-provider: cert-manager
spec:
Expand Down Expand Up @@ -338,7 +338,7 @@ metadata:
namespace: knative-serving
labels:
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
networking.knative.dev/certificate-provider: cert-manager
spec:
Expand All @@ -351,7 +351,7 @@ spec:
labels:
app: net-certmanager-webhook
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
role: net-certmanager-webhook
spec:
Expand All @@ -360,7 +360,7 @@ spec:
- name: webhook
# This is the Go import path for the binary that is containerized
# and substituted here.
image: gcr.io/knative-nightly/knative.dev/net-certmanager/cmd/webhook@sha256:7aa5e2077da3f3c4bf026db19a673910b3b016ba9ce0a4482c506e13d461ee7e
image: gcr.io/knative-nightly/knative.dev/net-certmanager/cmd/webhook@sha256:5d3ae5ae850e2448107eb295ec90ee1a4ad102aeb3ded453a94e1c83b4d626b3
resources:
requests:
cpu: 20m
Expand Down Expand Up @@ -426,7 +426,7 @@ metadata:
labels:
role: net-certmanager-webhook
app.kubernetes.io/component: net-certmanager
app.kubernetes.io/version: "20231108-11e6219e"
app.kubernetes.io/version: "20231110-8b2a470c"
app.kubernetes.io/name: knative-serving
networking.knative.dev/certificate-provider: cert-manager
spec:
Expand Down
Loading

0 comments on commit f716986

Please sign in to comment.