diff --git a/go.mod b/go.mod index 98030cf8..02f9260d 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,6 @@ go 1.21 require ( github.com/benbjohnson/clock v1.3.5 github.com/davecgh/go-spew v1.1.1 - github.com/ghodss/yaml v1.0.0 github.com/go-logr/logr v1.3.0 github.com/golang/mock v1.6.0 github.com/google/uuid v1.4.0 diff --git a/go.sum b/go.sum index 84dda43d..e7a97c18 100644 --- a/go.sum +++ b/go.sum @@ -685,7 +685,6 @@ github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= diff --git a/pkg/runctl/constants/constants.go b/pkg/runctl/constants/constants.go index 23c5334c..894bdfb9 100644 --- a/pkg/runctl/constants/constants.go +++ b/pkg/runctl/constants/constants.go @@ -5,14 +5,7 @@ import ( ) const ( - - // RunClusterRoleName is the name of the cluster role + // RunClusterRoleName is the name of the cluster role that + // pipeline run service accounts are bound to. RunClusterRoleName k8s.RoleName = "steward-run" - - // JFRStepName is the name of the jfs step - JFRStepName = "step-jenkinsfile-runner" - - // TektonTaskRunName is the name of the Tekton TaskRun in each - // run namespace. - TektonTaskRunName = "steward-jenkinsfile-runner" ) diff --git a/pkg/runctl/controller.go b/pkg/runctl/controller.go index b237c845..1dc57ef0 100644 --- a/pkg/runctl/controller.go +++ b/pkg/runctl/controller.go @@ -306,7 +306,7 @@ func (c *Controller) newRunManager(workFactory k8s.ClientFactory, secretProvider return c.testing.newRunManagerStub(workFactory, secretProvider) } - return runmgr.NewRunManager(workFactory, secretProvider) + return runmgr.NewTektonRunManager(workFactory, secretProvider) } func (c *Controller) loadPipelineRunsConfig(ctx context.Context) (*cfg.PipelineRunsConfigStruct, error) { @@ -445,7 +445,7 @@ func (c *Controller) handlePipelineRunFinalizerAndDeletion( if pipelineRun.HasDeletionTimestamp() { logger.V(3).Info("Unfinished pipeline run was deleted") runManager := c.createRunManager(pipelineRun) - err := runManager.Cleanup(ctx, pipelineRun) + err := runManager.DeleteEnv(ctx, pipelineRun) if err != nil { c.eventRecorder.Event(pipelineRun.GetReference(), corev1.EventTypeWarning, api.EventReasonCleaningFailed, err.Error()) return true, err @@ -532,7 +532,7 @@ func (c *Controller) handlePipelineRunPrepare( "failed to load configuration for pipeline runs", ) } - namespace, auxNamespace, err := runManager.Prepare(ctx, pipelineRun, pipelineRunsConfig) + namespace, auxNamespace, err := runManager.CreateEnv(ctx, pipelineRun, pipelineRunsConfig) if err != nil { c.eventRecorder.Event(pipelineRun.GetReference(), corev1.EventTypeWarning, api.EventReasonPreparingFailed, err.Error()) resultClass := serrors.GetClass(err) @@ -641,11 +641,13 @@ func (c *Controller) handlePipelineRunWaiting( return false, nil } -func (c *Controller) startPipelineRun(ctx context.Context, +func (c *Controller) startPipelineRun( + ctx context.Context, runManager run.Manager, pipelineRun k8s.PipelineRun, - pipelineRunsConfig *cfg.PipelineRunsConfigStruct) error { - if err := runManager.Start(ctx, pipelineRun, pipelineRunsConfig); err != nil { + pipelineRunsConfig *cfg.PipelineRunsConfigStruct, +) error { + if err := runManager.CreateRun(ctx, pipelineRun, pipelineRunsConfig); err != nil { c.eventRecorder.Event(pipelineRun.GetReference(), corev1.EventTypeWarning, api.EventReasonWaitingFailed, err.Error()) resultClass := serrors.GetClass(err) // In case we have a result we can cleanup. Otherwise we retry in the next iteration. @@ -722,7 +724,7 @@ func (c *Controller) handlePipelineRunCleaning( if pipelineRun.GetStatus().State == api.StateCleaning { logger.V(3).Info("Cleaning up pipeline execution") - err := runManager.Cleanup(ctx, pipelineRun) + err := runManager.DeleteEnv(ctx, pipelineRun) if err != nil { c.eventRecorder.Event(pipelineRun.GetReference(), corev1.EventTypeWarning, api.EventReasonCleaningFailed, err.Error()) return true, err diff --git a/pkg/runctl/controller_test.go b/pkg/runctl/controller_test.go index cf86f1e8..814be564 100644 --- a/pkg/runctl/controller_test.go +++ b/pkg/runctl/controller_test.go @@ -15,7 +15,6 @@ import ( mocks "github.com/SAP/stewardci-core/pkg/k8s/mocks" "github.com/SAP/stewardci-core/pkg/k8s/secrets" cfg "github.com/SAP/stewardci-core/pkg/runctl/cfg" - "github.com/SAP/stewardci-core/pkg/runctl/constants" metricstesting "github.com/SAP/stewardci-core/pkg/runctl/metrics/testing" run "github.com/SAP/stewardci-core/pkg/runctl/run" runmocks "github.com/SAP/stewardci-core/pkg/runctl/run/mocks" @@ -36,7 +35,7 @@ import ( _ "knative.dev/pkg/system/testing" ) -func Test_Controller_meterAllPipelineRunsPeriodic(t *testing.T) { +func Test__Controller_meterAllPipelineRunsPeriodic(t *testing.T) { // no parallel: patching global state // SETUP @@ -71,7 +70,7 @@ func Test_Controller_meterAllPipelineRunsPeriodic(t *testing.T) { c.meterAllPipelineRunsPeriodic() } -func Test_Controller_Success(t *testing.T) { +func Test__Controller__Success(t *testing.T) { t.Parallel() // SETUP @@ -98,7 +97,7 @@ func Test_Controller_Success(t *testing.T) { assert.Equal(t, 2, len(status.StateHistory)) } -func Test_Controller_Running(t *testing.T) { +func Test__Controller__Running(t *testing.T) { t.Parallel() for _, containerState := range []string{ @@ -146,14 +145,14 @@ func Test_Controller_Running(t *testing.T) { func stepsWithContainer(state string, startTime metav1.Time) []tekton.StepState { var stepState tekton.StepState time, _ := json.Marshal(startTime) - s := fmt.Sprintf(`{ %q: {"startedAt": %s}, "container": %q, "name": "foo"}`, state, time, constants.JFRStepName) + s := fmt.Sprintf(`{ %q: {"startedAt": %s}, "name": %q}`, state, time, runmgr.JFRTaskRunStepName) json.Unmarshal([]byte(s), &stepState) return []tekton.StepState{ stepState, } } -func Test_Controller_Deletion(t *testing.T) { +func Test__Controller__Deletion(t *testing.T) { t.Parallel() // SETUP @@ -184,7 +183,7 @@ func Test_Controller_Deletion(t *testing.T) { assert.Equal(t, 0, len(run.GetFinalizers())) } -func Test_Controller_syncHandler_PipelineRunNotFound(t *testing.T) { +func Test__Controller_syncHandler__PipelineRunNotFound(t *testing.T) { t.Parallel() // SETUP @@ -241,7 +240,7 @@ func getAPIPipelineRun(cf *fake.ClientFactory, name, namespace string) (*api.Pip return cs.StewardV1alpha1().PipelineRuns(namespace).Get(ctx, name, metav1.GetOptions{}) } -func Test_Controller_syncHandler_deleted_unfinished(t *testing.T) { +func Test__Controller_syncHandler__PipelineRunIsDeletedAndUnfinished(t *testing.T) { for _, currentState := range []api.State{ api.StateUndefined, api.StateNew, @@ -263,7 +262,7 @@ func Test_Controller_syncHandler_deleted_unfinished(t *testing.T) { name: "with_finalizer/cleanup_succeeds", runManagerExpectation: func(rm *runmocks.MockManager) { rm.EXPECT(). - Cleanup(gomock.Any(), gomock.Any()). + DeleteEnv(gomock.Any(), gomock.Any()). Return(nil) }, withFinalizer: true, @@ -276,7 +275,7 @@ func Test_Controller_syncHandler_deleted_unfinished(t *testing.T) { name: "with_finalizer/cleanup_fails", runManagerExpectation: func(rm *runmocks.MockManager) { rm.EXPECT(). - Cleanup(gomock.Any(), gomock.Any()). + DeleteEnv(gomock.Any(), gomock.Any()). Return(errors.New("expected")) }, withFinalizer: true, @@ -289,7 +288,7 @@ func Test_Controller_syncHandler_deleted_unfinished(t *testing.T) { name: "no_finalizer/cleanup_succeeds", runManagerExpectation: func(rm *runmocks.MockManager) { rm.EXPECT(). - Cleanup(gomock.Any(), gomock.Any()). + DeleteEnv(gomock.Any(), gomock.Any()). Return(nil) }, withFinalizer: false, @@ -302,7 +301,7 @@ func Test_Controller_syncHandler_deleted_unfinished(t *testing.T) { name: "no_finalizer/cleanup_fails", runManagerExpectation: func(rm *runmocks.MockManager) { rm.EXPECT(). - Cleanup(gomock.Any(), gomock.Any()). + DeleteEnv(gomock.Any(), gomock.Any()). Return(errors.New("expected")) }, withFinalizer: false, @@ -360,7 +359,7 @@ func Test_Controller_syncHandler_deleted_unfinished(t *testing.T) { } } -func Test_Controller_syncHandler_deleted_finished(t *testing.T) { +func Test__Controller_syncHandler__PipelineRunIsDeletedAndFinished(t *testing.T) { t.Parallel() for _, currentResult := range []api.Result{ @@ -419,7 +418,7 @@ func Test_Controller_syncHandler_deleted_finished(t *testing.T) { } } -func Test_Controller_syncHandler_new(t *testing.T) { +func Test__Controller_syncHandler__PipelineRunIsNew(t *testing.T) { error1 := errors.New("error1") errorRecoverable1 := serrors.Recoverable(errors.New("errorRecoverable1")) @@ -442,7 +441,7 @@ func Test_Controller_syncHandler_new(t *testing.T) { pipelineRunSpec: api.PipelineSpec{}, runManagerExpectation: func(rm *runmocks.MockManager, run *runmocks.MockRun) { rm.EXPECT(). - Prepare(gomock.Any(), gomock.Any(), gomock.Any()). + CreateEnv(gomock.Any(), gomock.Any(), gomock.Any()). Return("", "", nil) }, expectedState: api.StateWaiting, @@ -453,7 +452,7 @@ func Test_Controller_syncHandler_new(t *testing.T) { pipelineRunSpec: api.PipelineSpec{}, runManagerExpectation: func(rm *runmocks.MockManager, run *runmocks.MockRun) { rm.EXPECT(). - Prepare(gomock.Any(), gomock.Any(), gomock.Any()). + CreateEnv(gomock.Any(), gomock.Any(), gomock.Any()). Return("", "", error1) }, expectedError: error1, @@ -559,7 +558,7 @@ func Test_Controller_syncHandler_new(t *testing.T) { } } -func Test_Controller_syncHandler_unfinished(t *testing.T) { +func Test__Controller_syncHandler__PipelineRunIsUnfinished(t *testing.T) { error1 := errors.New("error1") errorRecoverable1 := serrors.Recoverable(errors.New("errorRecoverable1")) longAgo := metav1.Unix(10, 10) @@ -590,7 +589,7 @@ func Test_Controller_syncHandler_unfinished(t *testing.T) { }, runManagerExpectation: func(rm *runmocks.MockManager, run *runmocks.MockRun) { rm.EXPECT(). - Prepare(gomock.Any(), gomock.Any(), gomock.Any()). + CreateEnv(gomock.Any(), gomock.Any(), gomock.Any()). Return("", "", nil) }, expectedState: api.StateWaiting, @@ -605,7 +604,7 @@ func Test_Controller_syncHandler_unfinished(t *testing.T) { }, runManagerExpectation: func(rm *runmocks.MockManager, run *runmocks.MockRun) { rm.EXPECT(). - Prepare(gomock.Any(), gomock.Any(), gomock.Any()). + CreateEnv(gomock.Any(), gomock.Any(), gomock.Any()). Return("", "", error1) }, expectedError: error1, @@ -623,7 +622,7 @@ func Test_Controller_syncHandler_unfinished(t *testing.T) { }, runManagerExpectation: func(rm *runmocks.MockManager, run *runmocks.MockRun) { rm.EXPECT(). - Prepare(gomock.Any(), gomock.Any(), gomock.Any()). + CreateEnv(gomock.Any(), gomock.Any(), gomock.Any()). Return("", "", serrors.Classify(error1, api.ResultErrorContent)) }, expectedState: api.StateCleaning, @@ -641,7 +640,7 @@ func Test_Controller_syncHandler_unfinished(t *testing.T) { }, runManagerExpectation: func(rm *runmocks.MockManager, run *runmocks.MockRun) { rm.EXPECT(). - Prepare(gomock.Any(), gomock.Any(), gomock.Any()). + CreateEnv(gomock.Any(), gomock.Any(), gomock.Any()). Return("", "", serrors.Classify(error1, api.ResultErrorContent)) }, expectedState: api.StateCleaning, @@ -657,7 +656,7 @@ func Test_Controller_syncHandler_unfinished(t *testing.T) { }, runManagerExpectation: func(rm *runmocks.MockManager, run *runmocks.MockRun) { rm.EXPECT(). - Prepare(gomock.Any(), gomock.Any(), gomock.Any()). + CreateEnv(gomock.Any(), gomock.Any(), gomock.Any()). Return("", "", serrors.Classify(error1, api.ResultErrorInfra)) }, expectedState: api.StateCleaning, @@ -789,7 +788,7 @@ func Test_Controller_syncHandler_unfinished(t *testing.T) { GetRun(gomock.Any(), gomock.Any()). Return(nil, nil) rm.EXPECT(). - Start(gomock.Any(), gomock.Any(), gomock.Any()). + CreateRun(gomock.Any(), gomock.Any(), gomock.Any()). Return(nil) }, expectedState: api.StateWaiting, @@ -807,7 +806,7 @@ func Test_Controller_syncHandler_unfinished(t *testing.T) { GetRun(gomock.Any(), gomock.Any()). Return(nil, nil) rm.EXPECT(). - Start(gomock.Any(), gomock.Any(), gomock.Any()). + CreateRun(gomock.Any(), gomock.Any(), gomock.Any()). Return(error1) }, expectedError: error1, @@ -826,7 +825,7 @@ func Test_Controller_syncHandler_unfinished(t *testing.T) { GetRun(gomock.Any(), gomock.Any()). Return(nil, nil) rm.EXPECT(). - Start(gomock.Any(), gomock.Any(), gomock.Any()). + CreateRun(gomock.Any(), gomock.Any(), gomock.Any()). Return(serrors.Classify(error1, api.ResultErrorConfig)) }, expectedState: api.StateCleaning, @@ -1219,7 +1218,7 @@ func Test_Controller_syncHandler_unfinished(t *testing.T) { }, runManagerExpectation: func(rm *runmocks.MockManager, run *runmocks.MockRun) { rm.EXPECT(). - Cleanup(gomock.Any(), gomock.Any()). + DeleteEnv(gomock.Any(), gomock.Any()). Return(nil) }, expectedState: api.StateFinished, @@ -1276,7 +1275,7 @@ func Test_Controller_syncHandler_unfinished(t *testing.T) { }, runManagerExpectation: func(rm *runmocks.MockManager, run *runmocks.MockRun) { rm.EXPECT(). - Cleanup(gomock.Any(), gomock.Any()). + DeleteEnv(gomock.Any(), gomock.Any()). Return(nil) }, expectedState: api.StateFinished, @@ -1345,7 +1344,7 @@ func Test_Controller_syncHandler_unfinished(t *testing.T) { } } -func Test_Controller_syncHandler_PipelineRunFetchFails_InternalServerError(t *testing.T) { +func Test__Controller_syncHandler__PipelineRunFetchFails_InternalServerError(t *testing.T) { t.Parallel() // SETUP @@ -1376,7 +1375,7 @@ func Test_Controller_syncHandler_PipelineRunFetchFails_InternalServerError(t *te assert.ErrorContains(t, err, message) } -func Test_Controller_syncHandler_OnTimeout(t *testing.T) { +func Test__Controller_syncHandler__OnTimeout(t *testing.T) { t.Parallel() // SETUP @@ -1459,7 +1458,7 @@ func Test_Controller_syncHandler_OnTimeout(t *testing.T) { } func newTestRunManager(workFactory k8s.ClientFactory, secretProvider secrets.SecretProvider) run.Manager { - return runmgr.NewRunManager(workFactory, secretProvider) + return runmgr.NewTektonRunManager(workFactory, secretProvider) } func startController(t *testing.T, cf *fake.ClientFactory) chan struct{} { @@ -1545,7 +1544,7 @@ func updateRun(t *testing.T, run *api.PipelineRun, namespace string, cf *fake.Cl func getTektonTaskRun(t *testing.T, namespace string, cf *fake.ClientFactory) *tekton.TaskRun { t.Helper() ctx := context.Background() - taskRun, err := cf.TektonV1beta1().TaskRuns(namespace).Get(ctx, constants.TektonTaskRunName, metav1.GetOptions{}) + taskRun, err := cf.TektonV1beta1().TaskRuns(namespace).Get(ctx, runmgr.JFRTaskRunName, metav1.GetOptions{}) if err != nil { t.Fatalf("could not get Tekton task run: %s", err.Error()) } diff --git a/pkg/runctl/run/interfaces.go b/pkg/runctl/run/interfaces.go index 1356e4de..a86830de 100644 --- a/pkg/runctl/run/interfaces.go +++ b/pkg/runctl/run/interfaces.go @@ -12,25 +12,61 @@ import ( // Manager manages runs type Manager interface { - Prepare(ctx context.Context, pipelineRun k8s.PipelineRun, pipelineRunsConfig *cfg.PipelineRunsConfigStruct) (string, string, error) - Start(ctx context.Context, pipelineRun k8s.PipelineRun, pipelineRunsConfig *cfg.PipelineRunsConfigStruct) error + // CreateEnv creates a new isolated environment for a new run. + // If an environment exists already, it will be removed first. + CreateEnv(ctx context.Context, pipelineRun k8s.PipelineRun, pipelineRunsConfig *cfg.PipelineRunsConfigStruct) (string, string, error) + + // CreateRun creates a new run in the prepared environment. + // Especially fails if the environment does not exist or a run exists + // already. + CreateRun(ctx context.Context, pipelineRun k8s.PipelineRun, pipelineRunsConfig *cfg.PipelineRunsConfigStruct) error + + // GetRun returns the run or nil if a run has not been created yet. GetRun(ctx context.Context, pipelineRun k8s.PipelineRun) (Run, error) - Cleanup(ctx context.Context, pipelineRun k8s.PipelineRun) error + + // DeleteRun deletes a task run for a given pipeline run. DeleteRun(ctx context.Context, pipelineRun k8s.PipelineRun) error + + // DeleteEnv removes an existing environment. + // If no environment exists, it succeeds. + DeleteEnv(ctx context.Context, pipelineRun k8s.PipelineRun) error } // Run represents a pipeline run type Run interface { + // GetStartTime returns the timestamp when the run actually started. + // Initialization steps should be excluded as far as possible. + // Returns nil if the run has not been started yet. GetStartTime() *metav1.Time - IsRestartable() bool - IsFinished() (bool, steward.Result) + + // GetCompletionTime returns the timestamp of the run's completion. + // Teardown steps should be excluded as far as possible. + // Returns nil if the run has never been started or has not completed yet. GetCompletionTime() *metav1.Time + + // IsFinished returns true if the run is finished. + // Note that a run can be finished without having been started, i.e. + // there was an error. + IsFinished() (bool, steward.Result) + + // IsRestartable returns true if run finished unsuccessfully and can be + // restarted with a possibly successful result. + IsRestartable() bool + + // GetContainerInfo returns the state of the Jenkinsfile Runner container + // as reported in the Tekton TaskRun status. GetContainerInfo() *corev1.ContainerState + + // GetMessage returns the status message. GetMessage() string + + // IsDeleted returns true if the receiver is nil or is marked as deleted. IsDeleted() bool } // SecretManager manages secrets of a pipelinerun type SecretManager interface { + // CopyAll copies all the required secrets of a pipeline run to the + // respective run namespace. CopyAll(ctx context.Context, pipelineRun k8s.PipelineRun) (string, []string, error) } diff --git a/pkg/runctl/run/mocks/mocks.go b/pkg/runctl/run/mocks/mocks.go index 47533b32..36101997 100644 --- a/pkg/runctl/run/mocks/mocks.go +++ b/pkg/runctl/run/mocks/mocks.go @@ -185,18 +185,48 @@ func (m *MockManager) EXPECT() *MockManagerMockRecorder { return m.recorder } -// Cleanup mocks base method. -func (m *MockManager) Cleanup(arg0 context.Context, arg1 k8s.PipelineRun) error { +// CreateEnv mocks base method. +func (m *MockManager) CreateEnv(arg0 context.Context, arg1 k8s.PipelineRun, arg2 *cfg.PipelineRunsConfigStruct) (string, string, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Cleanup", arg0, arg1) + ret := m.ctrl.Call(m, "CreateEnv", arg0, arg1, arg2) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(string) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// CreateEnv indicates an expected call of CreateEnv. +func (mr *MockManagerMockRecorder) CreateEnv(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateEnv", reflect.TypeOf((*MockManager)(nil).CreateEnv), arg0, arg1, arg2) +} + +// CreateRun mocks base method. +func (m *MockManager) CreateRun(arg0 context.Context, arg1 k8s.PipelineRun, arg2 *cfg.PipelineRunsConfigStruct) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateRun", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateRun indicates an expected call of CreateRun. +func (mr *MockManagerMockRecorder) CreateRun(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateRun", reflect.TypeOf((*MockManager)(nil).CreateRun), arg0, arg1, arg2) +} + +// DeleteEnv mocks base method. +func (m *MockManager) DeleteEnv(arg0 context.Context, arg1 k8s.PipelineRun) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteEnv", arg0, arg1) ret0, _ := ret[0].(error) return ret0 } -// Cleanup indicates an expected call of Cleanup. -func (mr *MockManagerMockRecorder) Cleanup(arg0, arg1 interface{}) *gomock.Call { +// DeleteEnv indicates an expected call of DeleteEnv. +func (mr *MockManagerMockRecorder) DeleteEnv(arg0, arg1 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Cleanup", reflect.TypeOf((*MockManager)(nil).Cleanup), arg0, arg1) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteEnv", reflect.TypeOf((*MockManager)(nil).DeleteEnv), arg0, arg1) } // DeleteRun mocks base method. @@ -228,36 +258,6 @@ func (mr *MockManagerMockRecorder) GetRun(arg0, arg1 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRun", reflect.TypeOf((*MockManager)(nil).GetRun), arg0, arg1) } -// Prepare mocks base method. -func (m *MockManager) Prepare(arg0 context.Context, arg1 k8s.PipelineRun, arg2 *cfg.PipelineRunsConfigStruct) (string, string, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Prepare", arg0, arg1, arg2) - ret0, _ := ret[0].(string) - ret1, _ := ret[1].(string) - ret2, _ := ret[2].(error) - return ret0, ret1, ret2 -} - -// Prepare indicates an expected call of Prepare. -func (mr *MockManagerMockRecorder) Prepare(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Prepare", reflect.TypeOf((*MockManager)(nil).Prepare), arg0, arg1, arg2) -} - -// Start mocks base method. -func (m *MockManager) Start(arg0 context.Context, arg1 k8s.PipelineRun, arg2 *cfg.PipelineRunsConfigStruct) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Start", arg0, arg1, arg2) - ret0, _ := ret[0].(error) - return ret0 -} - -// Start indicates an expected call of Start. -func (mr *MockManagerMockRecorder) Start(arg0, arg1, arg2 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockManager)(nil).Start), arg0, arg1, arg2) -} - // MockSecretManager is a mock of SecretManager interface. type MockSecretManager struct { ctrl *gomock.Controller diff --git a/pkg/runctl/runmgr/constants.go b/pkg/runctl/runmgr/constants.go index 9c358930..401ae02b 100644 --- a/pkg/runctl/runmgr/constants.go +++ b/pkg/runctl/runmgr/constants.go @@ -1,3 +1,27 @@ package runmgr -const jfrResultKey string = "jfr-termination-log" +import steward "github.com/SAP/stewardci-core/pkg/apis/steward" + +const ( + // JFRTaskRunName is the name of the Tekton TaskRun in each + // run namespace. + JFRTaskRunName = "steward-jenkinsfile-runner" + + // JFRTaskRunStepName is the name of the step in the Tekton TaskRun that executes + // the Jenkinsfile Runner + JFRTaskRunStepName = "jenkinsfile-runner" +) + +const ( + runNamespacePrefix = "steward-run" + runNamespaceRandomLength = 5 + serviceAccountName = "default" + serviceAccountTokenName = "steward-serviceaccount-token" + + // in general, the token of the above service account should not be automatically mounted into pods + automountServiceAccountToken = false + + annotationPipelineRunKey = steward.GroupName + "/pipeline-run-key" + + jfrResultKey string = "jfr-termination-log" +) diff --git a/pkg/runctl/runmgr/helpers_for_test.go b/pkg/runctl/runmgr/helpers_for_test.go index 1615d5af..856bd563 100644 --- a/pkg/runctl/runmgr/helpers_for_test.go +++ b/pkg/runctl/runmgr/helpers_for_test.go @@ -14,7 +14,6 @@ import ( k8smocks "github.com/SAP/stewardci-core/pkg/k8s/mocks" secretmocks "github.com/SAP/stewardci-core/pkg/k8s/secrets/mocks" cfg "github.com/SAP/stewardci-core/pkg/runctl/cfg" - "github.com/SAP/stewardci-core/pkg/runctl/constants" runctltesting "github.com/SAP/stewardci-core/pkg/runctl/testing" tektonfakeclient "github.com/SAP/stewardci-core/pkg/tektonclient/clientset/versioned/fake" gomock "github.com/golang/mock/gomock" @@ -152,7 +151,7 @@ func (h *testHelper1) dummyTektonTaskRun() *tektonv1beta1.TaskRun { t.Helper() return &tektonv1beta1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ - Name: constants.TektonTaskRunName, + Name: JFRTaskRunName, Namespace: h.runNamespace1, }, } diff --git a/pkg/runctl/runmgr/run.go b/pkg/runctl/runmgr/run.go index d9ef0059..b6c4197b 100644 --- a/pkg/runctl/runmgr/run.go +++ b/pkg/runctl/runmgr/run.go @@ -2,8 +2,7 @@ package runmgr import ( steward "github.com/SAP/stewardci-core/pkg/apis/steward/v1alpha1" - "github.com/SAP/stewardci-core/pkg/runctl/constants" - run "github.com/SAP/stewardci-core/pkg/runctl/run" + runifc "github.com/SAP/stewardci-core/pkg/runctl/run" tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" termination "github.com/tektoncd/pipeline/pkg/termination" "go.uber.org/zap" @@ -17,37 +16,40 @@ const ( jfrExitCodeErrorConfig = 3 ) +// tektonRun is a runifc.Run based on Tekton. type tektonRun struct { tektonTaskRun *tekton.TaskRun } -// NewRun returns new Run -func NewRun(tektonTaskRun *tekton.TaskRun) run.Run { +// Compiler check for interface compliance +var _ runifc.Run = (*tektonRun)(nil) + +// newRun creates a new tektonRun +func newRun(tektonTaskRun *tekton.TaskRun) *tektonRun { return &tektonRun{tektonTaskRun: tektonTaskRun} } -// GetStartTime returns start time of run if already started -// start time must not be returned if condition is unknown but not running +// GetStartTime implements runifc.Run. func (r *tektonRun) GetStartTime() *metav1.Time { condition := r.getSucceededCondition() if condition == nil { return nil } - if condition.IsUnknown() && condition.Reason != tekton.TaskRunReasonRunning.String() { + if condition.IsUnknown() && condition.Reason != string(tekton.TaskRunReasonRunning) { return nil } - for _, step := range r.tektonTaskRun.Status.Steps { - if step.ContainerName == constants.JFRStepName && step.Running != nil { - return &step.Running.StartedAt + if stepState := r.getJFRStepState(); stepState != nil { + if stepState.Running != nil { + return &stepState.Running.StartedAt } - if step.ContainerName == constants.JFRStepName && step.Terminated != nil { - return &step.Terminated.StartedAt + if stepState.Terminated != nil { + return &stepState.Terminated.StartedAt } } return nil } -// GetCompletionTime returns completion time of run if already completed +// GetCompletionTime implements runifc.Run. func (r *tektonRun) GetCompletionTime() *metav1.Time { completionTime := r.tektonTaskRun.Status.CompletionTime if completionTime != nil { @@ -65,10 +67,9 @@ func (r *tektonRun) GetCompletionTime() *metav1.Time { return &now } -// GetContainerInfo returns the state of the Jenkinsfile Runner container -// as reported in the Tekton TaskRun status. +// GetContainerInfo implements runifc.Run. func (r *tektonRun) GetContainerInfo() *corev1.ContainerState { - stepState := r.getJenkinsfileRunnerStepState() + stepState := r.getJFRStepState() if stepState == nil { return nil } @@ -79,7 +80,7 @@ func (r *tektonRun) getSucceededCondition() *knativeapis.Condition { return r.tektonTaskRun.Status.GetCondition(knativeapis.ConditionSucceeded) } -// IsRestartable returns true if run is finished but could be restarted +// IsRestartable implements runifc.Run. func (r *tektonRun) IsRestartable() bool { condition := r.getSucceededCondition() if condition.IsFalse() { @@ -94,7 +95,7 @@ func (r *tektonRun) IsRestartable() bool { return false } -// IsFinished returns true if run is finished +// IsFinished implements runifc.Run. func (r *tektonRun) IsFinished() (bool, steward.Result) { condition := r.getSucceededCondition() if condition.IsUnknown() { @@ -108,7 +109,7 @@ func (r *tektonRun) IsFinished() (bool, steward.Result) { case string(tekton.TaskRunReasonTimedOut): return true, steward.ResultTimeout case string(tekton.TaskRunReasonFailed): - jfrStepState := r.getJenkinsfileRunnerStepState() + jfrStepState := r.getJFRStepState() if jfrStepState != nil && jfrStepState.Terminated != nil { switch jfrStepState.Terminated.ExitCode { case jfrExitCodeErrorContent: @@ -123,7 +124,7 @@ func (r *tektonRun) IsFinished() (bool, steward.Result) { return true, steward.ResultErrorInfra } -// GetMessage returns the termination message +// GetMessage implements runifc.Run. func (r *tektonRun) GetMessage() string { var msg string @@ -150,14 +151,15 @@ func (r *tektonRun) GetMessage() string { return "internal error" } +// IsDeleted implements runifc.Run. func (r *tektonRun) IsDeleted() bool { return r == nil || r.tektonTaskRun.DeletionTimestamp != nil } -func (r *tektonRun) getJenkinsfileRunnerStepState() *tekton.StepState { +func (r *tektonRun) getJFRStepState() *tekton.StepState { steps := r.tektonTaskRun.Status.Steps for _, stepState := range steps { - if stepState.Name == tektonTaskJenkinsfileRunnerStep { + if stepState.Name == JFRTaskRunStepName { return &stepState } } diff --git a/pkg/runctl/runmgr/run_manager.go b/pkg/runctl/runmgr/run_manager.go index 8c74fb89..d8cacb98 100644 --- a/pkg/runctl/runmgr/run_manager.go +++ b/pkg/runctl/runmgr/run_manager.go @@ -33,30 +33,18 @@ import ( klog "k8s.io/klog/v2" ) -const ( - runNamespacePrefix = "steward-run" - runNamespaceRandomLength = 5 - serviceAccountName = "default" - serviceAccountTokenName = "steward-serviceaccount-token" - - // in general, the token of the above service account should not be automatically mounted into pods - automountServiceAccountToken = false - - // tektonTaskJenkinsfileRunnerStep is the name of the step - // in the Tekton TaskRun that executes the Jenkinsfile Runner - tektonTaskJenkinsfileRunnerStep = "jenkinsfile-runner" - - annotationPipelineRunKey = steward.GroupName + "/pipeline-run-key" -) - -type runManager struct { +// TektonRunManager is an implementation of runifc.Manager based on Tekton. +type TektonRunManager struct { factory k8s.ClientFactory secretProvider secrets.SecretProvider - testing *runManagerTesting + testing *tektonRunManagerTesting } -type runManagerTesting struct { +// Compiler check for interface compliance +var _ runifc.Manager = (*TektonRunManager)(nil) + +type tektonRunManagerTesting struct { cleanupStub func(context.Context, *runContext) error copySecretsToRunNamespaceStub func(context.Context, *runContext) (string, []string, error) createTektonTaskRunStub func(context.Context, *runContext) error @@ -80,16 +68,16 @@ type runContext struct { serviceAccount *k8s.ServiceAccountWrap } -// NewRunManager creates a new runManager. -func NewRunManager(factory k8s.ClientFactory, secretProvider secrets.SecretProvider) runifc.Manager { - return &runManager{ +// NewTektonRunManager creates a new TektonRunManager. +func NewTektonRunManager(factory k8s.ClientFactory, secretProvider secrets.SecretProvider) *TektonRunManager { + return &TektonRunManager{ factory: factory, secretProvider: secretProvider, } } -// Prepare prepares the isolated environment for a new run -func (c *runManager) Prepare(ctx context.Context, pipelineRun k8s.PipelineRun, pipelineRunsConfig *cfg.PipelineRunsConfigStruct) (namespace string, auxNamespace string, err error) { +// CreateEnv implements runifc.Manager. +func (c *TektonRunManager) CreateEnv(ctx context.Context, pipelineRun k8s.PipelineRun, pipelineRunsConfig *cfg.PipelineRunsConfigStruct) (namespace string, auxNamespace string, err error) { runCtx := &runContext{ pipelineRun: pipelineRun, @@ -117,8 +105,8 @@ func (c *runManager) Prepare(ctx context.Context, pipelineRun k8s.PipelineRun, p return runCtx.runNamespace, runCtx.auxNamespace, nil } -// Start starts the run in the environment prepared by Prepare. -func (c *runManager) Start(ctx context.Context, pipelineRun k8s.PipelineRun, pipelineRunsConfig *cfg.PipelineRunsConfigStruct) (err error) { +// CreateRun implements runifc.Manager. +func (c *TektonRunManager) CreateRun(ctx context.Context, pipelineRun k8s.PipelineRun, pipelineRunsConfig *cfg.PipelineRunsConfigStruct) (err error) { runCtx := &runContext{ pipelineRun: pipelineRun, @@ -132,7 +120,7 @@ func (c *runManager) Start(ctx context.Context, pipelineRun k8s.PipelineRun, pip // prepareRunNamespace creates a new namespace for the pipeline run // and populates it with needed resources. -func (c *runManager) prepareRunNamespace(ctx context.Context, runCtx *runContext) error { +func (c *TektonRunManager) prepareRunNamespace(ctx context.Context, runCtx *runContext) error { if c.testing != nil && c.testing.prepareRunNamespaceStub != nil { return c.testing.prepareRunNamespaceStub(ctx, runCtx) @@ -182,7 +170,7 @@ func (c *runManager) prepareRunNamespace(ctx context.Context, runCtx *runContext return nil } -func (c *runManager) setupServiceAccount(ctx context.Context, runCtx *runContext, pipelineCloneSecretName string, imagePullSecrets []string) error { +func (c *TektonRunManager) setupServiceAccount(ctx context.Context, runCtx *runContext, pipelineCloneSecretName string, imagePullSecrets []string) error { if c.testing != nil && c.testing.setupServiceAccountStub != nil { return c.testing.setupServiceAccountStub(ctx, runCtx, pipelineCloneSecretName, imagePullSecrets) } @@ -214,7 +202,7 @@ func (c *runManager) setupServiceAccount(ctx context.Context, runCtx *runContext return nil } -func (c *runManager) attachAllSecrets(ctx context.Context, runCtx *runContext, accountManager k8s.ServiceAccountManager, pipelineCloneSecretName string, imagePullSecrets []string) (*k8s.ServiceAccountWrap, error) { +func (c *TektonRunManager) attachAllSecrets(ctx context.Context, runCtx *runContext, accountManager k8s.ServiceAccountManager, pipelineCloneSecretName string, imagePullSecrets []string) (*k8s.ServiceAccountWrap, error) { logger := klog.FromContext(ctx) var serviceAccount *k8s.ServiceAccountWrap @@ -247,14 +235,14 @@ func (c *runManager) attachAllSecrets(ctx context.Context, runCtx *runContext, a return serviceAccount, nil } -func (c *runManager) copySecretsToRunNamespace(ctx context.Context, runCtx *runContext) (string, []string, error) { +func (c *TektonRunManager) copySecretsToRunNamespace(ctx context.Context, runCtx *runContext) (string, []string, error) { if c.testing != nil && c.testing.copySecretsToRunNamespaceStub != nil { return c.testing.copySecretsToRunNamespaceStub(ctx, runCtx) } return c.getSecretManager(runCtx).CopyAll(ctx, runCtx.pipelineRun) } -func (c *runManager) getSecretManager(runCtx *runContext) runifc.SecretManager { +func (c *TektonRunManager) getSecretManager(runCtx *runContext) runifc.SecretManager { if c.testing != nil && c.testing.getSecretManagerStub != nil { return c.testing.getSecretManagerStub(runCtx) } @@ -263,7 +251,7 @@ func (c *runManager) getSecretManager(runCtx *runContext) runifc.SecretManager { return secretmgr.NewSecretManager(secretHelper) } -func (c *runManager) setupStaticNetworkPolicies(ctx context.Context, runCtx *runContext) error { +func (c *TektonRunManager) setupStaticNetworkPolicies(ctx context.Context, runCtx *runContext) error { if c.testing != nil && c.testing.setupStaticNetworkPoliciesStub != nil { return c.testing.setupStaticNetworkPoliciesStub(ctx, runCtx) } @@ -283,7 +271,7 @@ func (c *runManager) setupStaticNetworkPolicies(ctx context.Context, runCtx *run return nil } -func (c *runManager) setupNetworkPolicyThatIsolatesAllPods(ctx context.Context, runCtx *runContext) error { +func (c *TektonRunManager) setupNetworkPolicyThatIsolatesAllPods(ctx context.Context, runCtx *runContext) error { if c.testing != nil && c.testing.setupNetworkPolicyThatIsolatesAllPodsStub != nil { return c.testing.setupNetworkPolicyThatIsolatesAllPodsStub(ctx, runCtx) } @@ -312,7 +300,7 @@ func (c *runManager) setupNetworkPolicyThatIsolatesAllPods(ctx context.Context, return nil } -func (c *runManager) setupNetworkPolicyFromConfig(ctx context.Context, runCtx *runContext) error { +func (c *TektonRunManager) setupNetworkPolicyFromConfig(ctx context.Context, runCtx *runContext) error { if c.testing != nil && c.testing.setupNetworkPolicyFromConfigStub != nil { return c.testing.setupNetworkPolicyFromConfigStub(ctx, runCtx) } @@ -341,7 +329,7 @@ func (c *runManager) setupNetworkPolicyFromConfig(ctx context.Context, runCtx *r return c.createResource(ctx, manifestYAMLStr, "networkpolicies", "network policy", expectedGroupKind, runCtx) } -func (c *runManager) setupStaticLimitRange(ctx context.Context, runCtx *runContext) error { +func (c *TektonRunManager) setupStaticLimitRange(ctx context.Context, runCtx *runContext) error { if c.testing != nil && c.testing.setupStaticLimitRangeStub != nil { return c.testing.setupStaticLimitRangeStub(ctx, runCtx) } @@ -356,7 +344,7 @@ func (c *runManager) setupStaticLimitRange(ctx context.Context, runCtx *runConte return nil } -func (c *runManager) setupLimitRangeFromConfig(ctx context.Context, runCtx *runContext) error { +func (c *TektonRunManager) setupLimitRangeFromConfig(ctx context.Context, runCtx *runContext) error { if c.testing != nil && c.testing.setupLimitRangeFromConfigStub != nil { return c.testing.setupLimitRangeFromConfigStub(ctx, runCtx) } @@ -374,7 +362,7 @@ func (c *runManager) setupLimitRangeFromConfig(ctx context.Context, runCtx *runC return c.createResource(ctx, configStr, "limitranges", "limit range", expectedGroupKind, runCtx) } -func (c *runManager) setupStaticResourceQuota(ctx context.Context, runCtx *runContext) error { +func (c *TektonRunManager) setupStaticResourceQuota(ctx context.Context, runCtx *runContext) error { if c.testing != nil && c.testing.setupStaticResourceQuotaStub != nil { return c.testing.setupStaticResourceQuotaStub(ctx, runCtx) } @@ -389,7 +377,7 @@ func (c *runManager) setupStaticResourceQuota(ctx context.Context, runCtx *runCo return nil } -func (c *runManager) setupResourceQuotaFromConfig(ctx context.Context, runCtx *runContext) error { +func (c *TektonRunManager) setupResourceQuotaFromConfig(ctx context.Context, runCtx *runContext) error { if c.testing != nil && c.testing.setupResourceQuotaFromConfigStub != nil { return c.testing.setupResourceQuotaFromConfigStub(ctx, runCtx) } @@ -407,7 +395,7 @@ func (c *runManager) setupResourceQuotaFromConfig(ctx context.Context, runCtx *r return c.createResource(ctx, configStr, "resourcequotas", "resource quota", expectedGroupKind, runCtx) } -func (c *runManager) createResource(ctx context.Context, configStr string, resource string, resourceDisplayName string, expectedGroupKind schema.GroupKind, runCtx *runContext) error { +func (c *TektonRunManager) createResource(ctx context.Context, configStr string, resource string, resourceDisplayName string, expectedGroupKind schema.GroupKind, runCtx *runContext) error { var obj *unstructured.Unstructured // decode @@ -462,7 +450,7 @@ func GetPipelineRunKeyAnnotation(object metav1.Object) string { return annotations[annotationPipelineRunKey] } -func (c *runManager) createTektonTaskRunObject(ctx context.Context, runCtx *runContext) (*tekton.TaskRun, error) { +func (c *TektonRunManager) createTektonTaskRunObject(ctx context.Context, runCtx *runContext) (*tekton.TaskRun, error) { var err error @@ -478,7 +466,7 @@ func (c *runManager) createTektonTaskRunObject(ctx context.Context, runCtx *runC automount := true tektonTaskRun := tekton.TaskRun{ ObjectMeta: metav1.ObjectMeta{ - Name: constants.TektonTaskRunName, + Name: JFRTaskRunName, Namespace: namespace, Annotations: map[string]string{ annotationPipelineRunKey: runCtx.pipelineRun.GetKey(), @@ -541,7 +529,7 @@ func getTimeout(runCtx *runContext) *metav1.Duration { return timeout } -func (c *runManager) createTektonTaskRun(ctx context.Context, runCtx *runContext) error { +func (c *TektonRunManager) createTektonTaskRun(ctx context.Context, runCtx *runContext) error { if c.testing != nil && c.testing.createTektonTaskRunStub != nil { return c.testing.createTektonTaskRunStub(ctx, runCtx) } @@ -555,7 +543,7 @@ func (c *runManager) createTektonTaskRun(ctx context.Context, runCtx *runContext return err } -func (c *runManager) addTektonTaskRunParamsForJenkinsfileRunnerImage( +func (c *TektonRunManager) addTektonTaskRunParamsForJenkinsfileRunnerImage( runCtx *runContext, tektonTaskRun *tekton.TaskRun, ) { @@ -581,7 +569,7 @@ func (c *runManager) addTektonTaskRunParamsForJenkinsfileRunnerImage( tektonTaskRun.Spec.Params = append(tektonTaskRun.Spec.Params, params...) } -func (c *runManager) addTektonTaskRunParamsForRunDetails( +func (c *TektonRunManager) addTektonTaskRunParamsForRunDetails( runCtx *runContext, tektonTaskRun *tekton.TaskRun, ) { @@ -602,7 +590,7 @@ func (c *runManager) addTektonTaskRunParamsForRunDetails( } } -func (c *runManager) addTektonTaskRunParamsForPipeline( +func (c *TektonRunManager) addTektonTaskRunParamsForPipeline( runCtx *runContext, tektonTaskRun *tekton.TaskRun, ) error { @@ -629,7 +617,7 @@ func (c *runManager) addTektonTaskRunParamsForPipeline( return nil } -func (c *runManager) addTektonTaskRunParamsForLoggingElasticsearch( +func (c *TektonRunManager) addTektonTaskRunParamsForLoggingElasticsearch( runCtx *runContext, tektonTaskRun *tekton.TaskRun, ) error { @@ -671,7 +659,7 @@ func (c *runManager) addTektonTaskRunParamsForLoggingElasticsearch( return nil } -func (c *runManager) recoverableIfTransient(err error) error { +func (c *TektonRunManager) recoverableIfTransient(err error) error { return serrors.RecoverableIf(err, k8serrors.IsServerTimeout(err) || k8serrors.IsServiceUnavailable(err) || @@ -681,26 +669,26 @@ func (c *runManager) recoverableIfTransient(err error) error { k8serrors.IsUnexpectedServerError(err)) } -// GetRun based on a pipelineRun -func (c *runManager) GetRun(ctx context.Context, pipelineRun k8s.PipelineRun) (runifc.Run, error) { +// GetRun creates a new TektonRunManager. +func (c *TektonRunManager) GetRun(ctx context.Context, pipelineRun k8s.PipelineRun) (runifc.Run, error) { namespace := pipelineRun.GetRunNamespace() - run, err := c.factory.TektonV1beta1().TaskRuns(namespace).Get(ctx, constants.TektonTaskRunName, metav1.GetOptions{}) + run, err := c.factory.TektonV1beta1().TaskRuns(namespace).Get(ctx, JFRTaskRunName, metav1.GetOptions{}) if k8serrors.IsNotFound(err) { return nil, nil } if err != nil { return nil, c.recoverableIfTransient(err) } - return NewRun(run), nil + return newRun(run), nil } -// DeleteRun deletes a JFR task run for a given pipeline run. -func (c *runManager) DeleteRun(ctx context.Context, pipelineRun k8s.PipelineRun) error { +// DeleteRun creates a new TektonRunManager. +func (c *TektonRunManager) DeleteRun(ctx context.Context, pipelineRun k8s.PipelineRun) error { namespace := pipelineRun.GetRunNamespace() if namespace == "" { return fmt.Errorf("cannot delete taskrun, run namespace not set in %q", pipelineRun.GetName()) } - err := c.factory.TektonV1beta1().TaskRuns(namespace).Delete(ctx, constants.TektonTaskRunName, metav1.DeleteOptions{}) + err := c.factory.TektonV1beta1().TaskRuns(namespace).Delete(ctx, JFRTaskRunName, metav1.DeleteOptions{}) if k8serrors.IsNotFound(err) { return nil @@ -711,8 +699,8 @@ func (c *runManager) DeleteRun(ctx context.Context, pipelineRun k8s.PipelineRun) return nil } -// Cleanup a run based on a pipelineRun -func (c *runManager) Cleanup(ctx context.Context, pipelineRun k8s.PipelineRun) error { +// DeleteEnv creates a new TektonRunManager. +func (c *TektonRunManager) DeleteEnv(ctx context.Context, pipelineRun k8s.PipelineRun) error { runCtx := &runContext{ pipelineRun: pipelineRun, runNamespace: pipelineRun.GetRunNamespace(), @@ -721,7 +709,7 @@ func (c *runManager) Cleanup(ctx context.Context, pipelineRun k8s.PipelineRun) e return c.cleanupNamespaces(ctx, runCtx) } -func (c *runManager) cleanupNamespaces(ctx context.Context, runCtx *runContext) error { +func (c *TektonRunManager) cleanupNamespaces(ctx context.Context, runCtx *runContext) error { if c.testing != nil && c.testing.cleanupStub != nil { return c.testing.cleanupStub(ctx, runCtx) } @@ -760,7 +748,7 @@ func (c *runManager) cleanupNamespaces(ctx context.Context, runCtx *runContext) return fmt.Errorf("cannot delete all namespaces: %s", strings.Join(msg, ", ")) } -func (c *runManager) createNamespace(ctx context.Context, runCtx *runContext, purpose, randName string) (string, error) { +func (c *TektonRunManager) createNamespace(ctx context.Context, runCtx *runContext, purpose, randName string) (string, error) { var err error wanted := &corev1api.Namespace{ @@ -801,7 +789,7 @@ func (c *runManager) createNamespace(ctx context.Context, runCtx *runContext, pu return created.GetName(), err } -func (c *runManager) deleteNamespace(ctx context.Context, name string, options metav1.DeleteOptions) error { +func (c *TektonRunManager) deleteNamespace(ctx context.Context, name string, options metav1.DeleteOptions) error { isIgnorable := func(err error) bool { return k8serrors.IsNotFound(err) || k8serrors.IsGone(err) || diff --git a/pkg/runctl/runmgr/run_manager_test.go b/pkg/runctl/runmgr/run_manager_test.go index a93c3bd9..85acc24a 100644 --- a/pkg/runctl/runmgr/run_manager_test.go +++ b/pkg/runctl/runmgr/run_manager_test.go @@ -10,14 +10,12 @@ import ( featureflag "github.com/SAP/stewardci-core/pkg/featureflag" featureflagtesting "github.com/SAP/stewardci-core/pkg/featureflag/testing" k8s "github.com/SAP/stewardci-core/pkg/k8s" - fake "github.com/SAP/stewardci-core/pkg/k8s/fake" k8sfake "github.com/SAP/stewardci-core/pkg/k8s/fake" k8smocks "github.com/SAP/stewardci-core/pkg/k8s/mocks" "github.com/SAP/stewardci-core/pkg/k8s/secrets" secretproviderfakes "github.com/SAP/stewardci-core/pkg/k8s/secrets/providers/fake" k8ssecretprovider "github.com/SAP/stewardci-core/pkg/k8s/secrets/providers/k8s" cfg "github.com/SAP/stewardci-core/pkg/runctl/cfg" - "github.com/SAP/stewardci-core/pkg/runctl/constants" runifc "github.com/SAP/stewardci-core/pkg/runctl/run" runmocks "github.com/SAP/stewardci-core/pkg/runctl/run/mocks" runctltesting "github.com/SAP/stewardci-core/pkg/runctl/testing" @@ -38,8 +36,8 @@ import ( dynamicfake "k8s.io/client-go/dynamic/fake" ) -func newRunManagerTestingWithAllNoopStubs() *runManagerTesting { - return &runManagerTesting{ +func newTektonRunManagerTestingWithAllNoopStubs() *tektonRunManagerTesting { + return &tektonRunManagerTesting{ cleanupStub: func(context.Context, *runContext) error { return nil }, copySecretsToRunNamespaceStub: func(context.Context, *runContext) (string, []string, error) { return "", []string{}, nil }, setupLimitRangeFromConfigStub: func(context.Context, *runContext) error { return nil }, @@ -53,8 +51,8 @@ func newRunManagerTestingWithAllNoopStubs() *runManagerTesting { } } -func newRunManagerTestingWithRequiredStubs() *runManagerTesting { - return &runManagerTesting{} +func newTektonRunManagerTestingWithRequiredStubs() *tektonRunManagerTesting { + return &tektonRunManagerTesting{} } func contextWithSpec(t *testing.T, runNamespaceName string, spec stewardv1alpha1.PipelineSpec) *runContext { @@ -67,7 +65,7 @@ func contextWithSpec(t *testing.T, runNamespaceName string, spec stewardv1alpha1 } } -func Test__runManager_prepareRunNamespace__CreatesNamespaces(t *testing.T) { +func Test__TektonRunManager_prepareRunNamespace__CreatesNamespaces(t *testing.T) { for _, ffEnabled := range []bool{true, false} { t.Run(fmt.Sprintf("featureflag_CreateAuxNamespaceIfUnused_%t", ffEnabled), func(t *testing.T) { defer featureflagtesting.WithFeatureFlag(featureflag.CreateAuxNamespaceIfUnused, ffEnabled)() @@ -84,8 +82,8 @@ func Test__runManager_prepareRunNamespace__CreatesNamespaces(t *testing.T) { config := &cfg.PipelineRunsConfigStruct{} secretProvider := secretproviderfakes.NewProvider(h.namespace1) - examinee := NewRunManager(cf, secretProvider).(*runManager) - examinee.testing = newRunManagerTestingWithAllNoopStubs() + examinee := NewTektonRunManager(cf, secretProvider) + examinee.testing = newTektonRunManagerTestingWithAllNoopStubs() pipelineRunHelper, err := k8s.NewPipelineRun(h.ctx, h.getPipelineRunFromStorage(cf, h.namespace1, h.pipelineRun1), cf) assert.NilError(t, err) @@ -121,7 +119,7 @@ func Test__runManager_prepareRunNamespace__CreatesNamespaces(t *testing.T) { } } -func Test__runManager_prepareRunNamespace__Calls__copySecretsToRunNamespace__AndPropagatesError(t *testing.T) { +func Test__TektonRunManager_prepareRunNamespace__Calls__copySecretsToRunNamespace__AndPropagatesError(t *testing.T) { t.Parallel() // SETUP @@ -137,8 +135,8 @@ func Test__runManager_prepareRunNamespace__Calls__copySecretsToRunNamespace__And pipelineRunHelper, err := k8s.NewPipelineRun(h.ctx, h.getPipelineRunFromStorage(cf, h.namespace1, h.pipelineRun1), cf) assert.NilError(t, err) - examinee := NewRunManager(cf, secretProvider).(*runManager) - examinee.testing = newRunManagerTestingWithAllNoopStubs() + examinee := NewTektonRunManager(cf, secretProvider) + examinee.testing = newTektonRunManagerTestingWithAllNoopStubs() expectedError := errors.New("some error") var methodCalled bool @@ -162,7 +160,7 @@ func Test__runManager_prepareRunNamespace__Calls__copySecretsToRunNamespace__And assert.Assert(t, methodCalled == true) } -func Test__runManager_prepareRunNamespace__Calls_setupServiceAccount_AndPropagatesError(t *testing.T) { +func Test__TektonRunManager_prepareRunNamespace__Calls_setupServiceAccount_AndPropagatesError(t *testing.T) { t.Parallel() // SETUP @@ -178,8 +176,8 @@ func Test__runManager_prepareRunNamespace__Calls_setupServiceAccount_AndPropagat pipelineRunHelper, err := k8s.NewPipelineRun(h.ctx, h.getPipelineRunFromStorage(cf, h.namespace1, h.pipelineRun1), cf) assert.NilError(t, err) - examinee := NewRunManager(cf, secretProvider).(*runManager) - examinee.testing = newRunManagerTestingWithAllNoopStubs() + examinee := NewTektonRunManager(cf, secretProvider) + examinee.testing = newTektonRunManagerTestingWithAllNoopStubs() expectedPipelineCloneSecretName := "pipelineCloneSecret1" expectedImagePullSecretNames := []string{"imagePullSecret1"} @@ -209,7 +207,7 @@ func Test__runManager_prepareRunNamespace__Calls_setupServiceAccount_AndPropagat assert.Assert(t, methodCalled == true) } -func Test__runManager_prepareRunNamespace__Calls_setupStaticNetworkPolicies_AndPropagatesError(t *testing.T) { +func Test__TektonRunManager_prepareRunNamespace__Calls_setupStaticNetworkPolicies_AndPropagatesError(t *testing.T) { t.Parallel() // SETUP @@ -225,8 +223,8 @@ func Test__runManager_prepareRunNamespace__Calls_setupStaticNetworkPolicies_AndP pipelineRunHelper, err := k8s.NewPipelineRun(h.ctx, h.getPipelineRunFromStorage(cf, h.namespace1, h.pipelineRun1), cf) assert.NilError(t, err) - examinee := NewRunManager(cf, secretProvider).(*runManager) - examinee.testing = newRunManagerTestingWithAllNoopStubs() + examinee := NewTektonRunManager(cf, secretProvider) + examinee.testing = newTektonRunManagerTestingWithAllNoopStubs() expectedError := errors.New("some error") var methodCalled bool @@ -249,14 +247,14 @@ func Test__runManager_prepareRunNamespace__Calls_setupStaticNetworkPolicies_AndP assert.Assert(t, methodCalled == true) } -func Test__runManager_setupStaticNetworkPolicies__Succeeds(t *testing.T) { +func Test__TektonRunManager_setupStaticNetworkPolicies__Succeeds(t *testing.T) { t.Parallel() // SETUP ctx := context.Background() runCtx := &runContext{} - examinee := runManager{ - testing: newRunManagerTestingWithAllNoopStubs(), + examinee := TektonRunManager{ + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupStaticNetworkPoliciesStub = nil @@ -267,14 +265,14 @@ func Test__runManager_setupStaticNetworkPolicies__Succeeds(t *testing.T) { assert.NilError(t, resultError) } -func Test__runManager_setupStaticNetworkPolicies__Calls_setupNetworkPolicyThatIsolatesAllPods_AndPropagatesError(t *testing.T) { +func Test__TektonRunManager_setupStaticNetworkPolicies__Calls_setupNetworkPolicyThatIsolatesAllPods_AndPropagatesError(t *testing.T) { t.Parallel() // SETUP h := newTestHelper1(t) runCtx := &runContext{runNamespace: h.namespace1} - examinee := runManager{ - testing: newRunManagerTestingWithAllNoopStubs(), + examinee := TektonRunManager{ + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupStaticNetworkPoliciesStub = nil @@ -295,14 +293,14 @@ func Test__runManager_setupStaticNetworkPolicies__Calls_setupNetworkPolicyThatIs assert.Assert(t, methodCalled == true) } -func Test__runManager_setupStaticNetworkPolicies__Calls_setupNetworkPolicyFromConfig_AndPropagatesError(t *testing.T) { +func Test__TektonRunManager_setupStaticNetworkPolicies__Calls_setupNetworkPolicyFromConfig_AndPropagatesError(t *testing.T) { t.Parallel() // SETUP h := newTestHelper1(t) runCtx := &runContext{runNamespace: h.namespace1} - examinee := runManager{ - testing: newRunManagerTestingWithAllNoopStubs(), + examinee := TektonRunManager{ + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupStaticNetworkPoliciesStub = nil @@ -323,7 +321,7 @@ func Test__runManager_setupStaticNetworkPolicies__Calls_setupNetworkPolicyFromCo assert.Assert(t, methodCalled == true) } -func Test__runManager_setupNetworkPolicyThatIsolatesAllPods(t *testing.T) { +func Test__TektonRunManager_setupNetworkPolicyThatIsolatesAllPods(t *testing.T) { t.Parallel() // SETUP @@ -335,9 +333,9 @@ func Test__runManager_setupNetworkPolicyThatIsolatesAllPods(t *testing.T) { cf := k8sfake.NewClientFactory() cf.KubernetesClientset().PrependReactor("create", "*", k8sfake.GenerateNameReactor(0)) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupNetworkPolicyThatIsolatesAllPodsStub = nil @@ -360,7 +358,7 @@ func Test__runManager_setupNetworkPolicyThatIsolatesAllPods(t *testing.T) { } } -func Test__runManager_setupNetworkPolicyFromConfig__NoPolicyConfigured(t *testing.T) { +func Test__TektonRunManager_setupNetworkPolicyFromConfig__NoPolicyConfigured(t *testing.T) { t.Parallel() // SETUP @@ -376,9 +374,9 @@ func Test__runManager_setupNetworkPolicyFromConfig__NoPolicyConfigured(t *testin // the SUT should not use it if no policy is configured. cf := k8smocks.NewMockClientFactory(mockCtrl) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupNetworkPolicyFromConfigStub = nil @@ -389,7 +387,7 @@ func Test__runManager_setupNetworkPolicyFromConfig__NoPolicyConfigured(t *testin assert.NilError(t, resultError) } -func Test__runManager_setupNetworkPolicyFromConfig__SetsMetadataAndLeavesOtherThingsUntouched(t *testing.T) { +func Test__TektonRunManager_setupNetworkPolicyFromConfig__SetsMetadataAndLeavesOtherThingsUntouched(t *testing.T) { t.Parallel() // SETUP @@ -439,9 +437,9 @@ func Test__runManager_setupNetworkPolicyFromConfig__SetsMetadataAndLeavesOtherTh ) cf.DynamicClient.PrependReactor("create", "*", k8sfake.GenerateNameReactor(0)) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupNetworkPolicyFromConfigStub = nil @@ -484,7 +482,7 @@ func Test__runManager_setupNetworkPolicyFromConfig__SetsMetadataAndLeavesOtherTh } } -func Test__runManager_setupNetworkPolicyFromConfig__ReplacesAllMetadata(t *testing.T) { +func Test__TektonRunManager_setupNetworkPolicyFromConfig__ReplacesAllMetadata(t *testing.T) { t.Parallel() // SETUP @@ -536,9 +534,9 @@ func Test__runManager_setupNetworkPolicyFromConfig__ReplacesAllMetadata(t *testi ) cf.DynamicClient.PrependReactor("create", "*", k8sfake.GenerateNameReactor(0)) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupNetworkPolicyFromConfigStub = nil @@ -564,7 +562,7 @@ func Test__runManager_setupNetworkPolicyFromConfig__ReplacesAllMetadata(t *testi } } -func Test_RunManager_setupNetworkPolicyFromConfig_ChooseCorrectPolicy(t *testing.T) { +func Test__TektonRunManager_setupNetworkPolicyFromConfig__ChooseCorrectPolicy(t *testing.T) { t.Parallel() for _, tc := range []struct { @@ -664,9 +662,9 @@ func Test_RunManager_setupNetworkPolicyFromConfig_ChooseCorrectPolicy(t *testing }, } - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupNetworkPolicyFromConfigStub = nil @@ -694,7 +692,7 @@ func Test_RunManager_setupNetworkPolicyFromConfig_ChooseCorrectPolicy(t *testing } } -func Test_RunManager_setupNetworkPolicyFromConfig_MalformedPolicy(t *testing.T) { +func Test__TektonRunManager_setupNetworkPolicyFromConfig__MalformedPolicy(t *testing.T) { t.Parallel() // SETUP @@ -713,9 +711,9 @@ func Test_RunManager_setupNetworkPolicyFromConfig_MalformedPolicy(t *testing.T) // the SUT should not use it if policy decoding fails. cf := k8smocks.NewMockClientFactory(mockCtrl) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupNetworkPolicyFromConfigStub = nil @@ -726,7 +724,7 @@ func Test_RunManager_setupNetworkPolicyFromConfig_MalformedPolicy(t *testing.T) assert.ErrorContains(t, resultError, "failed to decode configured network policy: ") } -func Test__runManager_setupNetworkPolicyFromConfig__UnexpectedGroup(t *testing.T) { +func Test__TektonRunManager_setupNetworkPolicyFromConfig__UnexpectedGroup(t *testing.T) { t.Parallel() // SETUP @@ -748,9 +746,9 @@ func Test__runManager_setupNetworkPolicyFromConfig__UnexpectedGroup(t *testing.T // the SUT should not use it if policy decoding fails. cf := k8smocks.NewMockClientFactory(mockCtrl) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupNetworkPolicyFromConfigStub = nil @@ -764,7 +762,7 @@ func Test__runManager_setupNetworkPolicyFromConfig__UnexpectedGroup(t *testing.T " \"NetworkPolicy.unexpected.group\"") } -func Test__runManager_setupNetworkPolicyFromConfig__UnexpectedKind(t *testing.T) { +func Test__TektonRunManager_setupNetworkPolicyFromConfig__UnexpectedKind(t *testing.T) { t.Parallel() // SETUP @@ -786,9 +784,9 @@ func Test__runManager_setupNetworkPolicyFromConfig__UnexpectedKind(t *testing.T) // the SUT should not use it if policy decoding fails. cf := k8smocks.NewMockClientFactory(mockCtrl) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupNetworkPolicyFromConfigStub = nil @@ -802,14 +800,14 @@ func Test__runManager_setupNetworkPolicyFromConfig__UnexpectedKind(t *testing.T) " \"UnexpectedKind.networking.k8s.io\"") } -func Test__runManager_setupStaticLimitRange__Calls__setupLimitRangeFromConfig__AndPropagatesError(t *testing.T) { +func Test__TektonRunManager_setupStaticLimitRange__Calls__setupLimitRangeFromConfig__AndPropagatesError(t *testing.T) { t.Parallel() // SETUP h := newTestHelper1(t) runCtx := &runContext{runNamespace: h.namespace1} - examinee := runManager{ - testing: newRunManagerTestingWithAllNoopStubs(), + examinee := TektonRunManager{ + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupStaticLimitRangeStub = nil @@ -830,14 +828,14 @@ func Test__runManager_setupStaticLimitRange__Calls__setupLimitRangeFromConfig__A assert.Assert(t, methodCalled == true) } -func Test__runManager_setupStaticLimitRange__Succeeds(t *testing.T) { +func Test__TektonRunManager_setupStaticLimitRange__Succeeds(t *testing.T) { t.Parallel() // SETUP ctx := context.Background() runCtx := &runContext{} - examinee := runManager{ - testing: newRunManagerTestingWithAllNoopStubs(), + examinee := TektonRunManager{ + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupStaticLimitRangeStub = nil @@ -848,7 +846,7 @@ func Test__runManager_setupStaticLimitRange__Succeeds(t *testing.T) { assert.NilError(t, resultError) } -func Test__runManager_setupLimitRangeFromConfig__NoLimitRangeConfigured(t *testing.T) { +func Test__TektonRunManager_setupLimitRangeFromConfig__NoLimitRangeConfigured(t *testing.T) { t.Parallel() // SETUP @@ -866,9 +864,9 @@ func Test__runManager_setupLimitRangeFromConfig__NoLimitRangeConfigured(t *testi // the SUT should not use it if no policy is configured. cf := k8smocks.NewMockClientFactory(mockCtrl) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupLimitRangeFromConfigStub = nil @@ -877,7 +875,7 @@ func Test__runManager_setupLimitRangeFromConfig__NoLimitRangeConfigured(t *testi assert.NilError(t, resultError) } -func Test__runManager_setupLimitRangeFromConfig__MalformedLimitRange(t *testing.T) { +func Test__TektonRunManager_setupLimitRangeFromConfig__MalformedLimitRange(t *testing.T) { t.Parallel() // SETUP @@ -895,9 +893,9 @@ func Test__runManager_setupLimitRangeFromConfig__MalformedLimitRange(t *testing. // the SUT should not use it if policy decoding fails. cf := k8smocks.NewMockClientFactory(mockCtrl) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupLimitRangeFromConfigStub = nil @@ -908,7 +906,7 @@ func Test__runManager_setupLimitRangeFromConfig__MalformedLimitRange(t *testing. assert.ErrorContains(t, resultError, "failed to decode configured limit range: ") } -func Test__runManager_setupLimitRangeFromConfig__UnexpectedGroup(t *testing.T) { +func Test__TektonRunManager_setupLimitRangeFromConfig__UnexpectedGroup(t *testing.T) { t.Parallel() // SETUP @@ -929,9 +927,9 @@ func Test__runManager_setupLimitRangeFromConfig__UnexpectedGroup(t *testing.T) { // the SUT should not use it if policy decoding fails. cf := k8smocks.NewMockClientFactory(mockCtrl) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupLimitRangeFromConfigStub = nil @@ -945,7 +943,7 @@ func Test__runManager_setupLimitRangeFromConfig__UnexpectedGroup(t *testing.T) { " \"LimitRange.unexpected.group\"") } -func Test__runManager_setupLimitRangeFromConfig__UnexpectedKind(t *testing.T) { +func Test__TektonRunManager_setupLimitRangeFromConfig__UnexpectedKind(t *testing.T) { t.Parallel() // SETUP @@ -966,9 +964,9 @@ func Test__runManager_setupLimitRangeFromConfig__UnexpectedKind(t *testing.T) { // the SUT should not use it if policy decoding fails. cf := k8smocks.NewMockClientFactory(mockCtrl) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupLimitRangeFromConfigStub = nil @@ -982,14 +980,14 @@ func Test__runManager_setupLimitRangeFromConfig__UnexpectedKind(t *testing.T) { " \"UnexpectedKind\"") } -func Test__runManager_setupStaticResourceQuota__Calls__setupResourceQuotaFromConfig__AndPropagatesError(t *testing.T) { +func Test__TektonRunManager_setupStaticResourceQuota__Calls__setupResourceQuotaFromConfig__AndPropagatesError(t *testing.T) { t.Parallel() // SETUP h := newTestHelper1(t) runCtx := &runContext{runNamespace: h.namespace1} - examinee := runManager{ - testing: newRunManagerTestingWithAllNoopStubs(), + examinee := TektonRunManager{ + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupStaticResourceQuotaStub = nil @@ -1010,14 +1008,14 @@ func Test__runManager_setupStaticResourceQuota__Calls__setupResourceQuotaFromCon assert.Assert(t, methodCalled == true) } -func Test__runManager_setupStaticResourceQuota__Succeeds(t *testing.T) { +func Test__TektonRunManager_setupStaticResourceQuota__Succeeds(t *testing.T) { t.Parallel() // SETUP ctx := context.Background() runCtx := &runContext{} - examinee := runManager{ - testing: newRunManagerTestingWithAllNoopStubs(), + examinee := TektonRunManager{ + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupStaticResourceQuotaStub = nil @@ -1028,7 +1026,7 @@ func Test__runManager_setupStaticResourceQuota__Succeeds(t *testing.T) { assert.NilError(t, resultError) } -func Test__runManager_setupResourceQuotaFromConfig__NoQuotaConfigured(t *testing.T) { +func Test__TektonRunManager_setupResourceQuotaFromConfig__NoQuotaConfigured(t *testing.T) { t.Parallel() // SETUP @@ -1046,9 +1044,9 @@ func Test__runManager_setupResourceQuotaFromConfig__NoQuotaConfigured(t *testing // the SUT should not use it if no policy is configured. cf := k8smocks.NewMockClientFactory(mockCtrl) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupResourceQuotaFromConfigStub = nil @@ -1057,7 +1055,7 @@ func Test__runManager_setupResourceQuotaFromConfig__NoQuotaConfigured(t *testing assert.NilError(t, resultError) } -func Test__runManager_setupResourceQuotaFromConfig__MalformedResourceQuota(t *testing.T) { +func Test__TektonRunManager_setupResourceQuotaFromConfig__MalformedResourceQuota(t *testing.T) { t.Parallel() // SETUP @@ -1075,9 +1073,9 @@ func Test__runManager_setupResourceQuotaFromConfig__MalformedResourceQuota(t *te // the SUT should not use it if policy decoding fails. cf := k8smocks.NewMockClientFactory(mockCtrl) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupResourceQuotaFromConfigStub = nil @@ -1088,7 +1086,7 @@ func Test__runManager_setupResourceQuotaFromConfig__MalformedResourceQuota(t *te assert.ErrorContains(t, resultError, "failed to decode configured resource quota: ") } -func Test__runManager_setupResourceQuotaFromConfig__UnexpectedGroup(t *testing.T) { +func Test__TektonRunManager_setupResourceQuotaFromConfig__UnexpectedGroup(t *testing.T) { t.Parallel() // SETUP @@ -1109,9 +1107,9 @@ func Test__runManager_setupResourceQuotaFromConfig__UnexpectedGroup(t *testing.T // the SUT should not use it if policy decoding fails. cf := k8smocks.NewMockClientFactory(mockCtrl) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupResourceQuotaFromConfigStub = nil @@ -1125,7 +1123,7 @@ func Test__runManager_setupResourceQuotaFromConfig__UnexpectedGroup(t *testing.T " \"ResourceQuota.unexpected.group\"") } -func Test__runManager_setupResourceQuotaFromConfig__UnexpectedKind(t *testing.T) { +func Test__TektonRunManager_setupResourceQuotaFromConfig__UnexpectedKind(t *testing.T) { t.Parallel() // SETUP @@ -1146,9 +1144,9 @@ func Test__runManager_setupResourceQuotaFromConfig__UnexpectedKind(t *testing.T) // the SUT should not use it if policy decoding fails. cf := k8smocks.NewMockClientFactory(mockCtrl) - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } examinee.testing.setupResourceQuotaFromConfigStub = nil @@ -1162,7 +1160,7 @@ func Test__runManager_setupResourceQuotaFromConfig__UnexpectedKind(t *testing.T) " \"UnexpectedKind\"") } -func Test__runManager_createTektonTaskRun__PodTemplate_IsNotEmptyIfNoValuesToSet(t *testing.T) { +func Test__TektonRunManager_createTektonTaskRun__PodTemplate_IsNotEmptyIfNoValuesToSet(t *testing.T) { t.Parallel() // SETUP @@ -1178,9 +1176,9 @@ func Test__runManager_createTektonTaskRun__PodTemplate_IsNotEmptyIfNoValuesToSet } mockPipelineRun.UpdateRunNamespace(h.namespace1) cf := k8sfake.NewClientFactory() - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } // EXERCISE @@ -1189,14 +1187,14 @@ func Test__runManager_createTektonTaskRun__PodTemplate_IsNotEmptyIfNoValuesToSet // VERIFY assert.NilError(t, resultError) - taskRun, err := cf.TektonV1beta1().TaskRuns(h.namespace1).Get(h.ctx, constants.TektonTaskRunName, metav1.GetOptions{}) + taskRun, err := cf.TektonV1beta1().TaskRuns(h.namespace1).Get(h.ctx, JFRTaskRunName, metav1.GetOptions{}) assert.NilError(t, err) if equality.Semantic.DeepEqual(taskRun.Spec.PodTemplate, tektonPod.PodTemplate{}) { t.Fatal("podTemplate of TaskRun is empty") } } -func Test__runManager_createTektonTaskRun__PodTemplate_AllValuesSet(t *testing.T) { +func Test__TektonRunManager_createTektonTaskRun__PodTemplate_AllValuesSet(t *testing.T) { t.Parallel() int64Ptr := func(val int64) *int64 { return &val } @@ -1219,9 +1217,9 @@ func Test__runManager_createTektonTaskRun__PodTemplate_AllValuesSet(t *testing.T } cf := k8sfake.NewClientFactory() - examinee := runManager{ + examinee := TektonRunManager{ factory: cf, - testing: newRunManagerTestingWithAllNoopStubs(), + testing: newTektonRunManagerTestingWithAllNoopStubs(), } // EXERCISE @@ -1230,7 +1228,7 @@ func Test__runManager_createTektonTaskRun__PodTemplate_AllValuesSet(t *testing.T // VERIFY assert.NilError(t, resultError) - taskRun, err := cf.TektonV1beta1().TaskRuns(h.namespace1).Get(h.ctx, constants.TektonTaskRunName, metav1.GetOptions{}) + taskRun, err := cf.TektonV1beta1().TaskRuns(h.namespace1).Get(h.ctx, JFRTaskRunName, metav1.GetOptions{}) assert.NilError(t, err) automount := true @@ -1250,7 +1248,7 @@ func Test__runManager_createTektonTaskRun__PodTemplate_AllValuesSet(t *testing.T assert.DeepEqual(t, utils.Metav1Duration(4444), taskRun.Spec.Timeout) } -func Test__runManager_addTektonTaskRunParamsForLoggingElasticsearch(t *testing.T) { +func Test__TektonRunManager_addTektonTaskRunParamsForLoggingElasticsearch(t *testing.T) { t.Parallel() const ( TaskRunParamNameIndexURL = "PIPELINE_LOG_ELASTICSEARCH_INDEX_URL" @@ -1330,8 +1328,8 @@ func Test__runManager_addTektonTaskRunParamsForLoggingElasticsearch(t *testing.T defer mockCtrl.Finish() mockFactory, mockPipelineRun, mockSecretProvider := h.prepareMocksWithSpec(mockCtrl, test.spec) - examinee := NewRunManager(mockFactory, mockSecretProvider).(*runManager) - examinee.testing = newRunManagerTestingWithRequiredStubs() + examinee := NewTektonRunManager(mockFactory, mockSecretProvider) + examinee.testing = newTektonRunManagerTestingWithRequiredStubs() runCtx := &runContext{ pipelineRun: mockPipelineRun, @@ -1353,7 +1351,7 @@ func Test__runManager_addTektonTaskRunParamsForLoggingElasticsearch(t *testing.T }) } } -func Test__runManager_Start__CreatesTektonTaskRun(t *testing.T) { +func Test__TektonRunManager_CreateRun__CreatesTektonTaskRun(t *testing.T) { t.Parallel() // SETUP @@ -1364,22 +1362,22 @@ func Test__runManager_Start__CreatesTektonTaskRun(t *testing.T) { h.preparePredefinedClusterRole(mockFactory) config := &cfg.PipelineRunsConfigStruct{} - examinee := NewRunManager(mockFactory, mockSecretProvider).(*runManager) - examinee.testing = newRunManagerTestingWithRequiredStubs() + examinee := NewTektonRunManager(mockFactory, mockSecretProvider) + examinee.testing = newTektonRunManagerTestingWithRequiredStubs() // EXERCISE - resultError := examinee.Start(h.ctx, mockPipelineRun, config) + resultError := examinee.CreateRun(h.ctx, mockPipelineRun, config) assert.NilError(t, resultError) // VERIFY runNamespace := mockPipelineRun.GetRunNamespace() result, err := mockFactory.TektonV1beta1().TaskRuns(runNamespace).Get( - h.ctx, constants.TektonTaskRunName, metav1.GetOptions{}) + h.ctx, JFRTaskRunName, metav1.GetOptions{}) assert.NilError(t, err) assert.Assert(t, result != nil) } -func Test__runManager_Prepare__CleanupOnError(t *testing.T) { +func Test__TektonRunManager_CreateEnv__CleanupOnError(t *testing.T) { t.Parallel() prepareRunnamespaceErr := fmt.Errorf("cannot prepare run namespace: foo") @@ -1427,8 +1425,8 @@ func Test__runManager_Prepare__CleanupOnError(t *testing.T) { mockFactory, mockPipelineRun, mockSecretProvider := h.prepareMocks(mockCtrl) config := &cfg.PipelineRunsConfigStruct{} - examinee := NewRunManager(mockFactory, mockSecretProvider).(*runManager) - examinee.testing = newRunManagerTestingWithRequiredStubs() + examinee := NewTektonRunManager(mockFactory, mockSecretProvider) + examinee.testing = newTektonRunManagerTestingWithRequiredStubs() var cleanupCalled int examinee.testing.cleanupStub = func(_ context.Context, ctx *runContext) error { @@ -1444,7 +1442,7 @@ func Test__runManager_Prepare__CleanupOnError(t *testing.T) { } // EXERCISE - _, _, resultError := examinee.Prepare(h.ctx, mockPipelineRun, config) + _, _, resultError := examinee.CreateEnv(h.ctx, mockPipelineRun, config) // VERIFY if test.expectedError != nil { @@ -1455,14 +1453,14 @@ func Test__runManager_Prepare__CleanupOnError(t *testing.T) { } } -func Test__runManager_addTektonTaskRunParamsForJenkinsfileRunnerImage(t *testing.T) { +func Test__TektonRunManager_addTektonTaskRunParamsForJenkinsfileRunnerImage(t *testing.T) { t.Parallel() const ( pipelineRunsConfigDefaultImage = "defaultImage1" pipelineRunsConfigDefaultPolicy = "defaultPolicy1" ) - examinee := runManager{} + examinee := TektonRunManager{} for _, tc := range []struct { name string spec *stewardv1alpha1.PipelineSpec @@ -1558,7 +1556,7 @@ func Test__runManager_addTektonTaskRunParamsForJenkinsfileRunnerImage(t *testing } } -func Test__runManager_Prepare__DoesNotSetPipelineRunStatus(t *testing.T) { +func Test__TektonRunManager_CreateEnv__DoesNotSetPipelineRunStatus(t *testing.T) { t.Parallel() // SETUP @@ -1569,11 +1567,11 @@ func Test__runManager_Prepare__DoesNotSetPipelineRunStatus(t *testing.T) { h.preparePredefinedClusterRole(mockFactory) config := &cfg.PipelineRunsConfigStruct{} - examinee := NewRunManager(mockFactory, mockSecretProvider).(*runManager) - examinee.testing = newRunManagerTestingWithRequiredStubs() + examinee := NewTektonRunManager(mockFactory, mockSecretProvider) + examinee.testing = newTektonRunManagerTestingWithRequiredStubs() // EXERCISE - _, _, resultError := examinee.Prepare(h.ctx, mockPipelineRun, config) + _, _, resultError := examinee.CreateEnv(h.ctx, mockPipelineRun, config) assert.NilError(t, resultError) // VERIFY @@ -1581,7 +1579,7 @@ func Test__runManager_Prepare__DoesNotSetPipelineRunStatus(t *testing.T) { mockPipelineRun.EXPECT().UpdateState(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) } -func Test__runManager_Start__DoesNotSetPipelineRunStatus(t *testing.T) { +func Test__TektonRunManager_CreateRun__DoesNotSetPipelineRunStatus(t *testing.T) { t.Parallel() // SETUP @@ -1592,11 +1590,11 @@ func Test__runManager_Start__DoesNotSetPipelineRunStatus(t *testing.T) { h.preparePredefinedClusterRole(mockFactory) config := &cfg.PipelineRunsConfigStruct{} - examinee := NewRunManager(mockFactory, mockSecretProvider).(*runManager) - examinee.testing = newRunManagerTestingWithRequiredStubs() + examinee := NewTektonRunManager(mockFactory, mockSecretProvider) + examinee.testing = newTektonRunManagerTestingWithRequiredStubs() // EXERCISE - resultError := examinee.Start(h.ctx, mockPipelineRun, config) + resultError := examinee.CreateRun(h.ctx, mockPipelineRun, config) assert.NilError(t, resultError) // VERIFY @@ -1604,7 +1602,7 @@ func Test__runManager_Start__DoesNotSetPipelineRunStatus(t *testing.T) { mockPipelineRun.EXPECT().UpdateState(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) } -func Test__runManager_copySecretsToRunNamespace__DoesCopySecret(t *testing.T) { +func Test__TektonRunManager_copySecretsToRunNamespace__DoesCopySecret(t *testing.T) { t.Parallel() // SETUP @@ -1612,12 +1610,12 @@ func Test__runManager_copySecretsToRunNamespace__DoesCopySecret(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - examinee := &runManager{} + examinee := &TektonRunManager{} mockSecretManager := runmocks.NewMockSecretManager(mockCtrl) // inject secret manager - examinee.testing = newRunManagerTestingWithRequiredStubs() + examinee.testing = newTektonRunManagerTestingWithRequiredStubs() examinee.testing.getSecretManagerStub = func(*runContext) runifc.SecretManager { return mockSecretManager } @@ -1641,7 +1639,7 @@ func Test__runManager_copySecretsToRunNamespace__DoesCopySecret(t *testing.T) { assert.DeepEqual(t, []string{"foo", "bar"}, imagePullSecrets) } -func Test__runManager_Cleanup__RemovesNamespaces(t *testing.T) { +func Test__TektonRunManager_DeleteEnv__RemovesNamespaces(t *testing.T) { for _, ffEnabled := range []bool{true, false} { t.Run(fmt.Sprintf("featureflag_CreateAuxNamespaceIfUnused_%t", ffEnabled), func(t *testing.T) { defer featureflagtesting.WithFeatureFlag(featureflag.CreateAuxNamespaceIfUnused, ffEnabled)() @@ -1657,8 +1655,8 @@ func Test__runManager_Cleanup__RemovesNamespaces(t *testing.T) { config := &cfg.PipelineRunsConfigStruct{} secretProvider := secretproviderfakes.NewProvider(h.namespace1) - examinee := NewRunManager(cf, secretProvider).(*runManager) - examinee.testing = newRunManagerTestingWithAllNoopStubs() + examinee := NewTektonRunManager(cf, secretProvider) + examinee.testing = newTektonRunManagerTestingWithAllNoopStubs() examinee.testing.cleanupStub = nil pipelineRunHelper, err := k8s.NewPipelineRun(h.ctx, h.getPipelineRunFromStorage(cf, h.namespace1, h.pipelineRun1), cf) @@ -1687,7 +1685,7 @@ func Test__runManager_Cleanup__RemovesNamespaces(t *testing.T) { } // EXERCISE - resultErr := examinee.Cleanup(h.ctx, pipelineRunHelper) + resultErr := examinee.DeleteEnv(h.ctx, pipelineRunHelper) // VERIFY assert.NilError(t, resultErr) @@ -1710,7 +1708,7 @@ func dummySecretProvider(factory k8s.ClientFactory, namespace string) secrets.Se return k8ssecretprovider.NewProvider(secretsClient, namespace) } -func Test__runManager__Log_Elasticsearch(t *testing.T) { +func Test__TektonRunManager__Log_Elasticsearch(t *testing.T) { t.Parallel() const ( @@ -1734,7 +1732,7 @@ func Test__runManager__Log_Elasticsearch(t *testing.T) { setupExaminee := func( t *testing.T, pipelineRunJSON string, ) ( - examinee *runManager, runCtx *runContext, cf *k8sfake.ClientFactory, + examinee *TektonRunManager, runCtx *runContext, cf *k8sfake.ClientFactory, ) { pipelineRun := runctltesting.StewardObjectFromJSON(t, pipelineRunJSON).(*stewardv1alpha1.PipelineRun) t.Log("decoded:\n", spew.Sdump(pipelineRun)) @@ -1747,11 +1745,11 @@ func Test__runManager__Log_Elasticsearch(t *testing.T) { k8sPipelineRun, err := k8s.NewPipelineRun(ctx, pipelineRun, cf) assert.NilError(t, err) config := &cfg.PipelineRunsConfigStruct{} - examinee = NewRunManager( + examinee = NewTektonRunManager( cf, dummySecretProvider(cf, pipelineRun.GetNamespace()), - ).(*runManager) - examinee.testing = newRunManagerTestingWithRequiredStubs() + ) + examinee.testing = newTektonRunManagerTestingWithRequiredStubs() runCtx = &runContext{ pipelineRun: k8sPipelineRun, pipelineRunsConfig: config, @@ -2002,7 +2000,7 @@ func Test__runManager__Log_Elasticsearch(t *testing.T) { } } -func Test__runManager__getTimeout__retrievesPipelineTimeoutIfSetInThePipelineSpec(t *testing.T) { +func Test__TektonRunManager_getTimeout__retrievesPipelineTimeoutIfSetInThePipelineSpec(t *testing.T) { t.Parallel() //SETUP @@ -2035,7 +2033,7 @@ func Test__runManager__getTimeout__retrievesPipelineTimeoutIfSetInThePipelineSpe } -func Test__runManager__getTimeout__retrievesTheDefaultPipelineTimeoutIfTimeoutIsNilInThePipelineSpec(t *testing.T) { +func Test__TektonRunManager_getTimeout__retrievesTheDefaultPipelineTimeoutIfTimeoutIsNilInThePipelineSpec(t *testing.T) { t.Parallel() //SETUP @@ -2058,7 +2056,7 @@ func Test__runManager__getTimeout__retrievesTheDefaultPipelineTimeoutIfTimeoutIs assert.DeepEqual(t, defaultTimeout, result) } -func Test__runManager_GetRun_Missing(t *testing.T) { +func Test__TektonRunManager_GetRun_Missing(t *testing.T) { t.Parallel() // SETUP @@ -2068,7 +2066,7 @@ func Test__runManager_GetRun_Missing(t *testing.T) { mockFactory, mockPipelineRun, mockSecretProvider := h.prepareMocks(mockCtrl) h.addTektonTaskRun(mockFactory) - examinee := NewRunManager(mockFactory, mockSecretProvider).(*runManager) + examinee := NewTektonRunManager(mockFactory, mockSecretProvider) // EXERCISE run, resultError := examinee.GetRun(h.ctx, mockPipelineRun) @@ -2080,7 +2078,7 @@ func Test__runManager_GetRun_Missing(t *testing.T) { mockPipelineRun.EXPECT().UpdateState(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) } -func Test__runManager_GetRun_Existing(t *testing.T) { +func Test__TektonRunManager_GetRun_Existing(t *testing.T) { t.Parallel() // SETUP @@ -2089,7 +2087,7 @@ func Test__runManager_GetRun_Existing(t *testing.T) { defer mockCtrl.Finish() mockFactory, mockPipelineRun, mockSecretProvider := h.prepareMocks(mockCtrl) - examinee := NewRunManager(mockFactory, mockSecretProvider).(*runManager) + examinee := NewTektonRunManager(mockFactory, mockSecretProvider) // EXERCISE run, resultError := examinee.GetRun(h.ctx, mockPipelineRun) @@ -2101,7 +2099,7 @@ func Test__runManager_GetRun_Existing(t *testing.T) { mockPipelineRun.EXPECT().UpdateState(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) } -func Test__runManager_DeleteRun_Success(t *testing.T) { +func Test__TektonRunManager_DeleteRun_Success(t *testing.T) { t.Parallel() // SETUP @@ -2111,7 +2109,7 @@ func Test__runManager_DeleteRun_Success(t *testing.T) { mockFactory, mockPipelineRun, mockSecretProvider := h.prepareMocks(mockCtrl) h.addTektonTaskRun(mockFactory) - examinee := NewRunManager(mockFactory, mockSecretProvider).(*runManager) + examinee := NewTektonRunManager(mockFactory, mockSecretProvider) // EXERCISE resultError := examinee.DeleteRun(h.ctx, mockPipelineRun) @@ -2122,7 +2120,7 @@ func Test__runManager_DeleteRun_Success(t *testing.T) { mockPipelineRun.EXPECT().UpdateState(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) } -func Test__runManager_DeleteRun_Missing(t *testing.T) { +func Test__TektonRunManager_DeleteRun_Missing(t *testing.T) { t.Parallel() // SETUP @@ -2131,7 +2129,7 @@ func Test__runManager_DeleteRun_Missing(t *testing.T) { defer mockCtrl.Finish() mockFactory, mockPipelineRun, mockSecretProvider := h.prepareMocks(mockCtrl) - examinee := NewRunManager(mockFactory, mockSecretProvider) + examinee := NewTektonRunManager(mockFactory, mockSecretProvider) // EXERCISE resultError := examinee.DeleteRun(h.ctx, mockPipelineRun) @@ -2141,7 +2139,7 @@ func Test__runManager_DeleteRun_Missing(t *testing.T) { mockPipelineRun.EXPECT().UpdateState(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) } -func Test__runManager_DeleteRun_MissingRunNamespace(t *testing.T) { +func Test__TektonRunManager_DeleteRun_MissingRunNamespace(t *testing.T) { t.Parallel() // SETUP @@ -2151,7 +2149,7 @@ func Test__runManager_DeleteRun_MissingRunNamespace(t *testing.T) { defer mockCtrl.Finish() mockFactory, mockPipelineRun, mockSecretProvider := h.prepareMocks(mockCtrl) - examinee := NewRunManager(mockFactory, mockSecretProvider).(*runManager) + examinee := NewTektonRunManager(mockFactory, mockSecretProvider) mockPipelineRun.EXPECT().GetName().Return("foo").Times(1) @@ -2164,7 +2162,7 @@ func Test__runManager_DeleteRun_MissingRunNamespace(t *testing.T) { mockPipelineRun.EXPECT().UpdateState(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) } -func Test__runManager_DeleteRun_Recoverable(t *testing.T) { +func Test__TektonRunManager_DeleteRun_Recoverable(t *testing.T) { t.Parallel() const ( @@ -2195,7 +2193,7 @@ func Test__runManager_DeleteRun_Recoverable(t *testing.T) { mockFactory, mockPipelineRun, mockSecretProvider := h.prepareMocks(mockCtrl) h.tektonClientset.PrependReactor("delete", "*", k8sfake.NewErrorReactor(tc.transientError)) - examinee := NewRunManager(mockFactory, mockSecretProvider).(*runManager) + examinee := NewTektonRunManager(mockFactory, mockSecretProvider) // EXERCISE resultError := examinee.DeleteRun(h.ctx, mockPipelineRun) @@ -2207,12 +2205,12 @@ func Test__runManager_DeleteRun_Recoverable(t *testing.T) { } } -func newFakeClientFactory(objects ...runtime.Object) *fake.ClientFactory { - cf := fake.NewClientFactory(objects...) +func newFakeClientFactory(objects ...runtime.Object) *k8sfake.ClientFactory { + cf := k8sfake.NewClientFactory(objects...) - cf.KubernetesClientset().PrependReactor("create", "*", fake.GenerateNameReactor(0)) + cf.KubernetesClientset().PrependReactor("create", "*", k8sfake.GenerateNameReactor(0)) - cf.StewardClientset().PrependReactor("create", "*", fake.NewCreationTimestampReactor()) + cf.StewardClientset().PrependReactor("create", "*", k8sfake.NewCreationTimestampReactor()) return cf } diff --git a/pkg/runctl/runmgr/run_test.go b/pkg/runctl/runmgr/run_test.go index e5c11c22..b658812a 100644 --- a/pkg/runctl/runmgr/run_test.go +++ b/pkg/runctl/runmgr/run_test.go @@ -7,8 +7,8 @@ import ( "time" api "github.com/SAP/stewardci-core/pkg/apis/steward/v1alpha1" - "github.com/ghodss/yaml" tekton "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + "gotest.tools/v3/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -18,170 +18,291 @@ const ( stepStartTime = `2019-05-14T08:24:11Z` emptyBuild = `{}` - runningBuild = `{"status": - {"steps": [ - {"name": "jenkinsfile-runner", - "running": {"startedAt": "` + stepStartTime + `"}}]}}` - - completedSuccess = `{"status": - {"conditions": [ - {"message": "message1", - "reason": "Succeeded", - "status": "True", - "type": "Succeeded"}], - "steps": [ - {"name": "jenkinsfile-runner", - "terminated": { - "reason": "Completed", - "message": "ok", - "exitCode": 0}}]}}` - - completedErrorInfra = `{"status": - {"conditions": [ - {"message": "message1", - "reason": "Failed", - "status": "False", - "type": "Succeeded"}], - "steps": [ - {"name": "jenkinsfile-runner", - "terminated": { - "reason": "Error", - "message": "ko", - "exitCode": 1}}]}}` - - completedErrorContent = `{"status": - {"conditions": [ - {"message": "message1", - "reason": "Failed", - "status": "False", - "type": "Succeeded"}], - "steps": [ - {"name": "jenkinsfile-runner", - "terminated": { - "reason": "Error", - "message": "ko", - "exitCode": 2}}]}}` - - completedErrorConfig = `{"status": - {"conditions": [ - {"message": "message1", - "reason": "Failed", - "status": "False", - "type": "Succeeded"}], - "steps": [ - {"name": "jenkinsfile-runner", - "terminated": { - "reason": "Error", - "message": "ko", - "exitCode": 3}}]}}` - - completedValidationFailed = `{"status": - {"conditions": [ - {"message": "message1", - "reason": "TaskRunValidationFailed", - "status": "False", - "type": "Succeeded"}]}}` + runningBuild = `{ + "status": { + "steps": [ + { + "name": "jenkinsfile-runner", + "running": { + "startedAt": "` + stepStartTime + `" + } + } + ] + } + }` + + completedSuccess = `{ + "status": { + "conditions": [ + { + "message": "message1", + "reason": "Succeeded", + "status": "True", + "type": "Succeeded" + } + ], + "steps": [ + { + "name": "jenkinsfile-runner", + "terminated": { + "reason": "Completed", + "message": "ok", + "exitCode": 0 + } + } + ] + } + }` + + completedErrorInfra = `{ + "status": { + "conditions": [ + { + "message": "message1", + "reason": "Failed", + "status": "False", + "type": "Succeeded" + } + ], + "steps": [ + { + "name": "jenkinsfile-runner", + "terminated": { + "reason": "Error", + "message": "ko", + "exitCode": 1 + } + } + ] + } + }` + + completedErrorContent = `{ + "status": { + "conditions": [ + { + "message": "message1", + "reason": "Failed", + "status": "False", + "type": "Succeeded" + } + ], + "steps": [ + { + "name": "jenkinsfile-runner", + "terminated": { + "reason": "Error", + "message": "ko", + "exitCode": 2 + } + } + ] + } + }` + + completedErrorConfig = `{ + "status": { + "conditions": [ + { + "message": "message1", + "reason": "Failed", + "status": "False", + "type": "Succeeded" + } + ], + "steps": [ + { + "name": "jenkinsfile-runner", + "terminated": { + "reason": "Error", + "message": "ko", + "exitCode": 3 + } + } + ] + } + }` + + completedValidationFailed = `{ + "status": { + "conditions": [ + { + "message": "message1", + "reason": "TaskRunValidationFailed", + "status": "False", + "type": "Succeeded" + } + ] + } + }` //See issue https://github.com/SAP/stewardci-core/issues/? TODO: create public issue. internal: 21 - timeout = `{"status": {"conditions": [{"message": "TaskRun \"steward-jenkinsfile-runner\" failed to finish within \"10m0s\"", "reason": "TaskRunTimeout", "status": "False", "type": "Succeeded"}]}}` - - realStartedBuild = `status: - conditions: - - lastTransitionTime: ` + taskStartTime + ` - message: Not all Steps in the Task have finished executing - reason: Running - status: Unknown - type: Succeeded - podName: build-pod-38aa76 - startTime: - steps: - - container: step-jenkinsfile-runner - imageID: docker-pullable://alpine@sha256:acd3ca9941a85e8ed16515bfc5328e4e2f8c128caa72959a58a127b7801ee01f - name: jenkinsfile-runner - running: - startedAt: "` + stepStartTime + `" -` - - realCompletedSuccess = `status: - completionTime: "2019-05-14T08:24:49Z" - conditions: - - lastTransitionTime: "2019-10-04T13:57:28Z" - message: All Steps have completed executing - reason: Succeeded - status: "True" - type: Succeeded - podName: build-pod-38aa76 - startTime: "2019-05-14T08:24:08Z" - steps: - - container: step-jenkinsfile-runner - imageID: docker-pullable://alpine@sha256:acd3ca9941a85e8ed16515bfc5328e4e2f8c128caa72959a58a127b7801ee01f - name: jenkinsfile-runner - terminated: - containerID: docker://2ee92b9e6971cd76f896c5c4dc403203754bd4aa6c5191541e5f7d8e04ce9326 - exitCode: 0 - finishedAt: "2019-05-14T08:24:49Z" - reason: Completed - startedAt: "2019-05-14T08:24:11Z" -` - - completedMessageSuccess = `status: - completionTime: "2019-05-14T08:24:49Z" - conditions: - - lastTransitionTime: "2019-10-04T13:57:28Z" - message: All Steps have completed executing - reason: Succeeded - status: "True" - type: Succeeded - podName: build-pod-38aa76 - startTime: "2019-05-14T08:24:08Z" - steps: - - container: step-jenkinsfile-runner - imageID: docker-pullable://alpine@sha256:acd3ca9941a85e8ed16515bfc5328e4e2f8c128caa72959a58a127b7801ee01f - name: jenkinsfile-runner - terminated: - containerID: docker://2ee92b9e6971cd76f896c5c4dc403203754bd4aa6c5191541e5f7d8e04ce9326 - exitCode: 0 - finishedAt: "2019-05-14T08:24:49Z" - reason: Completed - message: %q - startedAt: "2019-05-14T08:24:11Z" -` - completionTimeSet = `status: - completionTime: 2019-05-14T08:24:49Z - ` - completionTimeNotSet = `status: {}` - - conditionSuccessWithTransitionTime = `status: - conditions: - - lastTransitionTime: "2021-10-07T08:59:59Z" - message: 'foo' - reason: CouldntGetTask - status: "False" - type: Succeeded - ` - conditionSuccessWithoutTransitionTime = `status: - conditions: - - message: 'bar' - reason: CouldntGetTask - status: "False" - type: Succeeded - ` - noSuccessCondition = `status: - conditions: - - lastTransitionTime: "2021-10-07T08:59:59Z" - message: 'baz' - reason: CouldntGetTask - status: "False" - type: Foo - ` - imagePullFailedCondition = `status: - conditions: - - lastTransitionTime: "2022-12-02T12:30:01Z" - message: 'failed to pull the image' - reason: TaskRunImagePullFailed - status: "False" - type: Succeeded -` + timeout = `{ + "status": { + "conditions": [ + { + "message": "TaskRun \"steward-jenkinsfile-runner\" failed to finish within \"10m0s\"", + "reason": "TaskRunTimeout", + "status": "False", + "type": "Succeeded" + } + ] + } + }` + + realStartedBuild = `{ + "status": { + "conditions": [ + { + "lastTransitionTime": "` + taskStartTime + `", + "message": "Not all Steps in the Task have finished executing", + "reason": "Running", + "status": "Unknown", + "type": "Succeeded" + } + ], + "podName": "build-pod-38aa76", + "startTime": null, + "steps": [ + { + "container": "step-jenkinsfile-runner", + "imageID": "docker-pullable://alpine@sha256:acd3ca9941a85e8ed16515bfc5328e4e2f8c128caa72959a58a127b7801ee01f", + "name": "jenkinsfile-runner", + "running": { + "startedAt": "` + stepStartTime + `" + } + } + ] + } + }` + + realCompletedSuccess = `{ + "status": { + "completionTime": "2019-05-14T08:24:49Z", + "conditions": [ + { + "lastTransitionTime": "2019-10-04T13:57:28Z", + "message": "All Steps have completed executing", + "reason": "Succeeded", + "status": "True", + "type": "Succeeded" + } + ], + "podName": "build-pod-38aa76", + "startTime": "2019-05-14T08:24:08Z", + "steps": [ + { + "container": "step-jenkinsfile-runner", + "imageID": "docker-pullable://alpine@sha256:acd3ca9941a85e8ed16515bfc5328e4e2f8c128caa72959a58a127b7801ee01f", + "name": "jenkinsfile-runner", + "terminated": { + "containerID": "docker://2ee92b9e6971cd76f896c5c4dc403203754bd4aa6c5191541e5f7d8e04ce9326", + "exitCode": 0, + "finishedAt": "2019-05-14T08:24:49Z", + "reason": "Completed", + "startedAt": "2019-05-14T08:24:11Z" + } + } + ] + } + }` + + completedMessageSuccess = `{ + "status": { + "completionTime": "2019-05-14T08:24:49Z", + "conditions": [ + { + "lastTransitionTime": "2019-10-04T13:57:28Z", + "message": "All Steps have completed executing", + "reason": "Succeeded", + "status": "True", + "type": "Succeeded" + } + ], + "podName": "build-pod-38aa76", + "startTime": "2019-05-14T08:24:08Z", + "steps": [ + { + "container": "step-jenkinsfile-runner", + "imageID": "docker-pullable://alpine@sha256:acd3ca9941a85e8ed16515bfc5328e4e2f8c128caa72959a58a127b7801ee01f", + "name": "jenkinsfile-runner", + "terminated": { + "containerID": "docker://2ee92b9e6971cd76f896c5c4dc403203754bd4aa6c5191541e5f7d8e04ce9326", + "exitCode": 0, + "finishedAt": "2019-05-14T08:24:49Z", + "reason": "Completed", + "message": %q, + "startedAt": "2019-05-14T08:24:11Z" + } + } + ] + } + }` + + completionTimeSet = `{ + "status": { + "completionTime": "2019-05-14T08:24:49Z" + } + }` + + completionTimeNotSet = `{ + "status": {} + }` + + conditionSuccessWithTransitionTime = `{ + "status": { + "conditions": [ + { + "lastTransitionTime": "2021-10-07T08:59:59Z", + "message": "foo", + "reason": "CouldntGetTask", + "status": "False", + "type": "Succeeded" + } + ] + } + }` + + conditionSuccessWithoutTransitionTime = `{ + "status": { + "conditions": [ + { + "message": "bar", + "reason": "CouldntGetTask", + "status": "False", + "type": "Succeeded" + } + ] + } + }` + + noSuccessCondition = `{ + "status": { + "conditions": [ + { + "lastTransitionTime": "2021-10-07T08:59:59Z", + "message": "baz", + "reason": "CouldntGetTask", + "status": "False", + "type": "Foo" + } + ] + } + }` + + imagePullFailedCondition = `{ + "status": { + "conditions": [ + { + "lastTransitionTime": "2022-12-02T12:30:01Z", + "message": "failed to pull the image", + "reason": "TaskRunImagePullFailed", + "status": "False", + "type": "Succeeded" + } + ] + } + }` ) func generateTime(timeRFC3339String string) *metav1.Time { @@ -190,48 +311,64 @@ func generateTime(timeRFC3339String string) *metav1.Time { return &mt } -func fakeTektonTaskRun(s string) *tekton.TaskRun { +func fakeTektonTaskRunFromJSON(s string) *tekton.TaskRun { var result tekton.TaskRun - json.Unmarshal([]byte(s), &result) + if err := json.Unmarshal([]byte(s), &result); err != nil { + panic(err) + } return &result } -func fakeTektonTaskRunYaml(s string) *tekton.TaskRun { - var result tekton.TaskRun - yaml.Unmarshal([]byte(s), &result) - return &result -} +func Test__GetStartTime__UnsetReturnsNil(t *testing.T) { + // SETUP + run := newRun(fakeTektonTaskRunFromJSON(emptyBuild)) -func Test__GetStartTime_UnsetReturnsNil(t *testing.T) { - run := NewRun(fakeTektonTaskRun(emptyBuild)) + // EXERCISE startTime := run.GetStartTime() + + // VERIFY assert.Assert(t, startTime == nil) } -func Test__GetStartTime_Set(t *testing.T) { +func Test__GetStartTime__Set(t *testing.T) { + // SETUP expectedTime := generateTime(stepStartTime) - run := NewRun(fakeTektonTaskRunYaml(realStartedBuild)) + run := newRun(fakeTektonTaskRunFromJSON(realStartedBuild)) + + // EXERCISE startTime := run.GetStartTime() + + // VERIFY assert.Assert(t, expectedTime.Equal(startTime), fmt.Sprintf("Expected: %s, Is: %s", expectedTime, startTime)) } -func Test__IsFinished_RunningUpdatesContainer(t *testing.T) { - run := NewRun(fakeTektonTaskRun(runningBuild)) +func Test__IsFinished__RunningUpdatesContainer(t *testing.T) { + // SETUP + run := newRun(fakeTektonTaskRunFromJSON(runningBuild)) + + // EXERCISE finished, _ := run.IsFinished() + + // VERIFY assert.Assert(t, run.GetContainerInfo().Running != nil) assert.Assert(t, finished == false) } -func Test__IsFinished_CompletedSuccess(t *testing.T) { - build := fakeTektonTaskRunYaml(realCompletedSuccess) - run := NewRun(build) +func Test__IsFinished__CompletedSuccess(t *testing.T) { + // SETUP + build := fakeTektonTaskRunFromJSON(realCompletedSuccess) + run := newRun(build) + + // EXERCISE finished, result := run.IsFinished() + + // VERIFY assert.Assert(t, run.GetContainerInfo().Terminated != nil) assert.Assert(t, finished == true) assert.Equal(t, result, api.ResultSuccess) } -func Test__IsFinished_CompletedFail(t *testing.T) { +func Test__IsFinished__CompletedFail(t *testing.T) { for _, test := range []struct { name string trString string @@ -252,10 +389,14 @@ func Test__IsFinished_CompletedFail(t *testing.T) { }, } { t.Run(test.name, func(t *testing.T) { + // SETUP + build := fakeTektonTaskRunFromJSON(test.trString) + run := newRun(build) - build := fakeTektonTaskRun(test.trString) - run := NewRun(build) + // EXERCISE finished, result := run.IsFinished() + + // VERIFY assert.Assert(t, run.GetContainerInfo().Terminated != nil) assert.Assert(t, finished == true) assert.Equal(t, result, test.expectedResult) @@ -263,23 +404,33 @@ func Test__IsFinished_CompletedFail(t *testing.T) { } } -func Test__IsFinished_CompletedValidationFail(t *testing.T) { - build := fakeTektonTaskRun(completedValidationFailed) - run := NewRun(build) +func Test__IsFinished__CompletedValidationFail(t *testing.T) { + // SETUP + build := fakeTektonTaskRunFromJSON(completedValidationFailed) + run := newRun(build) + + // EXERCISE finished, result := run.IsFinished() + + // VERIFY assert.Assert(t, finished == true) assert.Equal(t, result, api.ResultErrorInfra) } -func Test__IsFinished_Timeout(t *testing.T) { - run := NewRun(fakeTektonTaskRun(timeout)) +func Test__IsFinished__Timeout(t *testing.T) { + // SETUP + run := newRun(fakeTektonTaskRunFromJSON(timeout)) + + // EXERCISE finished, result := run.IsFinished() + + // VERIFY assert.Assert(t, run.GetContainerInfo() == nil) assert.Assert(t, finished == true) assert.Equal(t, result, api.ResultTimeout) } -func Test__IsRestartable_False(t *testing.T) { +func Test__IsRestartable__False(t *testing.T) { for id, taskrun := range []string{ completedSuccess, completedErrorInfra, @@ -289,16 +440,26 @@ func Test__IsRestartable_False(t *testing.T) { timeout, } { t.Run(fmt.Sprintf("%d", id), func(t *testing.T) { - run := NewRun(fakeTektonTaskRun(taskrun)) + // SETUP + run := newRun(fakeTektonTaskRunFromJSON(taskrun)) + + // EXERCISE result := run.IsRestartable() + + // VERIFY assert.Assert(t, result == false) }) } } -func Test__IsRestartable_ImagePullFailed(t *testing.T) { - run := NewRun(fakeTektonTaskRunYaml(imagePullFailedCondition)) +func Test__IsRestartable__ImagePullFailed(t *testing.T) { + // SETUP + run := newRun(fakeTektonTaskRunFromJSON(imagePullFailedCondition)) + + // EXERCISE result := run.IsRestartable() + + // VERIFY assert.Assert(t, result) } @@ -311,8 +472,13 @@ func Test__GetCompletionTime(t *testing.T) { noSuccessCondition, } { t.Run(fmt.Sprintf("%d", id), func(t *testing.T) { - run := NewRun(fakeTektonTaskRunYaml(taskrun)) + // SETUP + run := newRun(fakeTektonTaskRunFromJSON(taskrun)) + + // EXERCISE completionTime := run.GetCompletionTime() + + // VERIFY assert.Assert(t, completionTime != nil) }) } @@ -346,12 +512,15 @@ func Test__GetMessage(t *testing.T) { }, } { t.Run(test.name, func(t *testing.T) { - test := test - t.Parallel() + // SETUP buildString := fmt.Sprintf(completedMessageSuccess, test.inputMessage) - build := fakeTektonTaskRunYaml(buildString) - run := NewRun(build) + build := fakeTektonTaskRunFromJSON(buildString) + run := newRun(build) + + // EXERCISE result := run.GetMessage() + + // VERIFY assert.Equal(t, test.expectedMessage, result) }) } @@ -367,9 +536,9 @@ func Test__IsDeleted__WithReceiverNil(t *testing.T) { func Test__IsDeleted__WithoutTerminationTimestamp(t *testing.T) { // SETUP - taskRun := fakeTektonTaskRun(emptyBuild) + taskRun := fakeTektonTaskRunFromJSON(emptyBuild) taskRun.DeletionTimestamp = nil - run := NewRun(taskRun) + run := newRun(taskRun) // EXERCISE result := run.IsDeleted() @@ -380,9 +549,9 @@ func Test__IsDeleted__WithoutTerminationTimestamp(t *testing.T) { func Test__IsDeleted__WithTerminationTimestamp(t *testing.T) { // SETUP - taskRun := fakeTektonTaskRun(emptyBuild) + taskRun := fakeTektonTaskRunFromJSON(emptyBuild) taskRun.DeletionTimestamp = &metav1.Time{} - run := NewRun(taskRun) + run := newRun(taskRun) // EXERCISE result := run.IsDeleted()