From 49b025492ab65397abe266886e1ab9315f4cb057 Mon Sep 17 00:00:00 2001 From: edp-bot Date: Thu, 18 Jul 2024 14:47:37 +0000 Subject: [PATCH] Update documentation --- 404.html | 2 +- compliance/index.html | 2 +- .../annotations-and-labels/index.html | 4 +- developer-guide/autotest-coverage/index.html | 2 +- .../aws-deployment-diagram/index.html | 2 +- .../index.html | 2 +- .../aws-reference-architecture/index.html | 2 +- developer-guide/edp-workflow/index.html | 4 +- developer-guide/index.html | 2 +- .../kubernetes-deployment/index.html | 2 +- developer-guide/local-development/index.html | 4 +- .../mk-docs-development/index.html | 4 +- .../reference-architecture/index.html | 2 +- .../reference-cicd-pipeline/index.html | 2 +- developer-guide/telemetry/index.html | 4 +- faq/index.html | 4 +- features/index.html | 2 +- glossary/index.html | 2 +- index.html | 4 +- operator-guide/add-ons-overview/index.html | 4 +- .../advanced-installation-overview/index.html | 2 +- operator-guide/argocd-integration/index.html | 4 +- .../artifacts-verification/index.html | 4 +- .../aws-marketplace-install/index.html | 4 +- operator-guide/capsule/index.html | 4 +- .../configure-keycloak-oidc-eks/index.html | 4 +- .../container-registries/index.html | 4 +- .../index.html | 4 +- .../customize_deployment/index.html | 4 +- operator-guide/delete-edp/index.html | 4 +- operator-guide/dependency-track/index.html | 4 +- operator-guide/deploy-aws-eks/index.html | 4 +- operator-guide/deploy-okd-4.10/index.html | 4 +- operator-guide/deploy-okd/index.html | 4 +- operator-guide/ebs-csi-driver/index.html | 4 +- operator-guide/edp-access-model/index.html | 4 +- operator-guide/edp-kiosk-usage/index.html | 2 +- .../eks-oidc-integration/index.html | 4 +- operator-guide/enable-irsa/index.html | 4 +- .../index.html | 4 +- .../github-debug-webhooks/index.html | 4 +- .../gitlab-debug-webhooks/index.html | 4 +- operator-guide/harbor-oidc/index.html | 4 +- operator-guide/headlamp-oidc/index.html | 4 +- .../import-strategy-tekton/index.html | 4 +- operator-guide/index.html | 2 +- operator-guide/install-argocd/index.html | 4 +- operator-guide/install-defectdojo/index.html | 4 +- operator-guide/install-edp/index.html | 4 +- .../index.html | 4 +- operator-guide/install-harbor/index.html | 4 +- .../install-ingress-nginx/index.html | 4 +- operator-guide/install-keycloak/index.html | 4 +- operator-guide/install-kiosk/index.html | 4 +- operator-guide/install-loki/index.html | 4 +- .../install-reportportal/index.html | 4 +- operator-guide/install-tekton/index.html | 4 +- operator-guide/install-velero/index.html | 4 +- operator-guide/install-via-civo/index.html | 4 +- .../install-via-helmfile/index.html | 4 +- .../installation-overview/index.html | 2 +- .../jira-gerrit-integration/index.html | 2 +- operator-guide/jira-integration/index.html | 4 +- operator-guide/kaniko-irsa/index.html | 4 +- operator-guide/kibana-ilm-rollover/index.html | 4 +- .../kubernetes-cluster-settings/index.html | 4 +- .../logsight-integration/index.html | 4 +- operator-guide/loki-irsa/index.html | 4 +- .../manage-custom-certificate/index.html | 4 +- .../index.html | 4 +- operator-guide/multitenant-logging/index.html | 2 +- .../namespace-management/index.html | 4 +- operator-guide/nexus-sonatype/index.html | 4 +- .../notification-msteams/index.html | 4 +- operator-guide/oauth2-proxy/index.html | 4 +- .../openshift-cluster-settings/index.html | 4 +- operator-guide/overview-devsecops/index.html | 2 +- .../overview-multi-tenancy/index.html | 2 +- operator-guide/overview-sast/index.html | 2 +- operator-guide/package-registry/index.html | 4 +- operator-guide/prerequisites/index.html | 2 +- .../index.html | 4 +- .../reportportal-keycloak/index.html | 2 +- .../restore-edp-with-velero/index.html | 4 +- .../schedule-pods-restart/index.html | 4 +- .../sonarqube-visibility/index.html | 2 +- operator-guide/sonarqube/index.html | 4 +- operator-guide/ssl-automation-okd/index.html | 4 +- operator-guide/tekton-monitoring/index.html | 4 +- operator-guide/tekton-overview/index.html | 2 +- .../application-not-built/index.html | 2 +- .../invalid-codebase-name/index.html | 4 +- .../troubleshooting/overview/index.html | 2 +- .../resource-observability/index.html | 4 +- .../troubleshoot-applications/index.html | 4 +- .../index.html | 4 +- .../troubleshoot-git-server/index.html | 2 +- .../troubleshoot-stages/index.html | 4 +- operator-guide/upgrade-edp-3.0/index.html | 4 +- operator-guide/upgrade-edp-3.1/index.html | 4 +- operator-guide/upgrade-edp-3.2/index.html | 4 +- operator-guide/upgrade-edp-3.3/index.html | 4 +- operator-guide/upgrade-edp-3.4/index.html | 4 +- operator-guide/upgrade-edp-3.5/index.html | 4 +- operator-guide/upgrade-edp-3.6/index.html | 4 +- operator-guide/upgrade-edp-3.7/index.html | 4 +- operator-guide/upgrade-edp-3.8/index.html | 4 +- operator-guide/upgrade-edp-3.9/index.html | 4 +- .../upgrade-keycloak-19.0/index.html | 4 +- operator-guide/vcs/index.html | 2 +- operator-guide/velero-irsa/index.html | 4 +- .../waf-tf-configuration/index.html | 4 +- overrides/main.html | 5 +- overview/index.html | 2 +- pricing/index.html | 4 +- quick-start/create-application/index.html | 4 +- quick-start/deploy-application/index.html | 2 +- quick-start/integrate-argocd/index.html | 4 +- .../integrate-container-registry/index.html | 2 +- quick-start/integrate-github/index.html | 4 +- quick-start/integrate-sonarcloud/index.html | 2 +- quick-start/platform-installation/index.html | 4 +- quick-start/quick-start-overview/index.html | 4 +- roadmap/index.html | 2 +- sitemap.xml | 320 +++++++++--------- sitemap.xml.gz | Bin 1613 -> 1613 bytes supported-versions/index.html | 4 +- use-cases/application-scaffolding/index.html | 4 +- use-cases/autotest-as-quality-gate/index.html | 2 +- use-cases/external-secrets/index.html | 4 +- use-cases/index.html | 2 +- use-cases/tekton-custom-pipelines/index.html | 4 +- user-guide/add-application/index.html | 2 +- user-guide/add-autotest/index.html | 2 +- user-guide/add-cd-pipeline/index.html | 2 +- user-guide/add-cluster/index.html | 2 +- user-guide/add-git-server/index.html | 2 +- user-guide/add-infrastructure/index.html | 2 +- user-guide/add-library/index.html | 2 +- user-guide/add-marketplace/index.html | 2 +- user-guide/add-quality-gate/index.html | 4 +- user-guide/application/index.html | 2 +- user-guide/autotest/index.html | 2 +- user-guide/build-pipeline/index.html | 4 +- user-guide/cd-pipeline-details/index.html | 2 +- user-guide/ci-pipeline-details/index.html | 2 +- user-guide/cicd-overview/index.html | 2 +- user-guide/cluster/index.html | 4 +- user-guide/code-review-pipeline/index.html | 4 +- user-guide/components/index.html | 2 +- user-guide/configuration-overview/index.html | 2 +- user-guide/git-server-overview/index.html | 4 +- user-guide/gitops/index.html | 4 +- user-guide/index.html | 2 +- user-guide/infrastructure/index.html | 2 +- user-guide/library/index.html | 2 +- user-guide/manage-branches/index.html | 2 +- .../manage-container-registries/index.html | 2 +- user-guide/manage-environments/index.html | 2 +- user-guide/marketplace/index.html | 2 +- user-guide/opa-stages/index.html | 2 +- user-guide/prepare-for-release/index.html | 2 +- user-guide/quick-links/index.html | 2 +- user-guide/terraform-stages/index.html | 2 +- 164 files changed, 422 insertions(+), 423 deletions(-) diff --git a/404.html b/404.html index 75c0dff3e..0072a2217 100644 --- a/404.html +++ b/404.html @@ -1 +1 @@ - EPAM Delivery Platform

404 - Not found

\ No newline at end of file + EPAM Delivery Platform

404 - Not found

\ No newline at end of file diff --git a/compliance/index.html b/compliance/index.html index 46e62643d..2566271f7 100644 --- a/compliance/index.html +++ b/compliance/index.html @@ -1 +1 @@ - Compliance - EPAM Delivery Platform
Skip to content

Compliance⚓︎

The integrity of your deployments is our paramount commitment. We are devoted to strengthening our Kubernetes platform to comply with the most stringent security standards. Trust is the bedrock of our relationships, and we manifest this commitment by undergoing rigorous third-party audits to ensure compliance. We pledge unwavering support as you manage and deploy solutions within your environment, emphasizing security and reliability. Examine our compliance with various frameworks, laws, and regulations to understand our dedication to upholding robust security standards for the solutions you manage and deploy.

the EDP Badge

\ No newline at end of file + Compliance - EPAM Delivery Platform
Skip to content

Compliance⚓︎

The integrity of your deployments is our paramount commitment. We are devoted to strengthening our Kubernetes platform to comply with the most stringent security standards. Trust is the bedrock of our relationships, and we manifest this commitment by undergoing rigorous third-party audits to ensure compliance. We pledge unwavering support as you manage and deploy solutions within your environment, emphasizing security and reliability. Examine our compliance with various frameworks, laws, and regulations to understand our dedication to upholding robust security standards for the solutions you manage and deploy.

the EDP Badge

\ No newline at end of file diff --git a/developer-guide/annotations-and-labels/index.html b/developer-guide/annotations-and-labels/index.html index 80f03b3a5..51d5a815e 100644 --- a/developer-guide/annotations-and-labels/index.html +++ b/developer-guide/annotations-and-labels/index.html @@ -1,4 +1,4 @@ - Annotations and Labels - EPAM Delivery Platform
Skip to content

Annotations and Labels⚓︎

EPAM Delivery Platform uses labels to interact with various resources in a Kubernetes cluster. This guide details the resources, annotations, and labels used by the platform to streamline operations, enhance monitoring, and enforce governance.

Labels⚓︎

The table below contains all the labels used in EDP:

Label Key Target Resources Possible Values Description
app.edp.epam.com/secret-type Secrets jira, nexus, sonar, defectdojo, dependency-track,repository Identifies the type of the secret.
app.edp.epam.com/integration-secret Secrets true Indicates if the secret is used for integration.
app.edp.epam.com/codebase PipelineRun <codebase_name> Identifies the codebase associated with the PipelineRun.
app.edp.epam.com/codebasebranch PipelineRun <codebase_name>-<branch_name> Identifies the codebase branch associated with the PipelineRun.
app.edp.epam.com/pipeline PipelineRun, Taskrun <environment_name> Used by the EDP Portal to display autotests status(on Deploy environment)
app.edp.epam.com/pipelinetype PipelineRun, Taskrun autotestRunner, build, review, deploy Identifies the type of the Pipeline.
app.edp.epam.com/parentPipelineRun PipelineRun <cd-pipeline-autotest-runner-name> Used by the EDP Portal to display autotests status(on Deploy environment)
app.edp.epam.com/stage PipelineRun, Taskrun <stage_name> Used by the EDP Portal to display autotests status(on Deploy environment)
app.edp.epam.com/branch PipelineRun <branch_name> Identifies the branch associated with the PipelineRun.
app.edp.epam.com/codebaseType Codebase system,application Identify the type of the codebase.
app.edp.epam.com/systemType Codebase gitops Identify system repositories.
app.edp.epam.com/gitServer Ingress <gitServer_name> Identifies the ingress associated with the GitServer.
app.edp.epam.com/cdpipeline PipelineRun, Taskrun <cdpipeline> Identify cd pipeline associated with the PipelineRun
app.edp.epam.com/cdstage PipelineRun, Taskrun <cd_stage_name> Identify cd stage associated with the PipelineRun

Labels Usage in Secrets⚓︎

The table below shows what labels are used by specific secrets:

Secret Name Labels
ci-argocd app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=argocd
ci-defectdojo app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=defectdojo
ci-dependency-track app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=dependency-track
ci-jira app.edp.epam.com/secret-type=jira
ci-nexus app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=nexus
ci-sonarqube app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=sonar
gerrit-ciuser-sshkey app.edp.epam.com/secret-type=repository
kaniko-docker-config app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=registry
regcred app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=registry

Labels Usage in Tekton Pipeline Runs⚓︎

The table below displays what labels are used in specific Tekton pipelines:

PipelineRun Labels
review-pipeline app.edp.epam.com/codebase: <codebase_name>
app.edp.epam.com/codebasebranch: <codebase_name>-<branch_name>
app.edp.epam.com/pipelinetype: review
build-pipeline app.edp.epam.com/codebase: <codebase_name>
app.edp.epam.com/codebasebranch: <codebase_name>-<branch_name>
app.edp.epam.com/pipelinetype: build
autotest-runner-pipeline app.edp.epam.com/pipeline: <pipeline_name>
app.edp.epam.com/pipelinetype: autotestRunner
app.edp.epam.com/stage: <stage>
autotest-pipeline app.edp.epam.com/branch: <branch>
app.edp.epam.com/codebase: <codebase_name>
app.edp.epam.com/parentPipelineRun: <cd_pipeline>-<stage>
app.edp.epam.com/pipeline: <cd_pipeline>
app.edp.epam.com/stage: <stage>
deploy app.edp.epam.com/cdpipeline: <cd_pipeline>
app.edp.epam.com/cdstage: <cd_stage_name>
app.edp.epam.com/pipelinetype: deploy

Pipeline Usage Example⚓︎

To demonstrate label usage in the EDP Tekton pipelines, find below some EDP resource examples:

Codebase specification
metadata:
+ Annotations and Labels - EPAM Delivery Platform      

Annotations and Labels⚓︎

EPAM Delivery Platform uses labels to interact with various resources in a Kubernetes cluster. This guide details the resources, annotations, and labels used by the platform to streamline operations, enhance monitoring, and enforce governance.

Labels⚓︎

The table below contains all the labels used in EDP:

Label Key Target Resources Possible Values Description
app.edp.epam.com/secret-type Secrets jira, nexus, sonar, defectdojo, dependency-track,repository Identifies the type of the secret.
app.edp.epam.com/integration-secret Secrets true Indicates if the secret is used for integration.
app.edp.epam.com/codebase PipelineRun <codebase_name> Identifies the codebase associated with the PipelineRun.
app.edp.epam.com/codebasebranch PipelineRun <codebase_name>-<branch_name> Identifies the codebase branch associated with the PipelineRun.
app.edp.epam.com/pipeline PipelineRun, Taskrun <environment_name> Used by the EDP Portal to display autotests status(on Deploy environment)
app.edp.epam.com/pipelinetype PipelineRun, Taskrun autotestRunner, build, review, deploy Identifies the type of the Pipeline.
app.edp.epam.com/parentPipelineRun PipelineRun <cd-pipeline-autotest-runner-name> Used by the EDP Portal to display autotests status(on Deploy environment)
app.edp.epam.com/stage PipelineRun, Taskrun <stage_name> Used by the EDP Portal to display autotests status(on Deploy environment)
app.edp.epam.com/branch PipelineRun <branch_name> Identifies the branch associated with the PipelineRun.
app.edp.epam.com/codebaseType Codebase system,application Identify the type of the codebase.
app.edp.epam.com/systemType Codebase gitops Identify system repositories.
app.edp.epam.com/gitServer Ingress <gitServer_name> Identifies the ingress associated with the GitServer.
app.edp.epam.com/cdpipeline PipelineRun, Taskrun <cdpipeline> Identify cd pipeline associated with the PipelineRun
app.edp.epam.com/cdstage PipelineRun, Taskrun <cd_stage_name> Identify cd stage associated with the PipelineRun

Labels Usage in Secrets⚓︎

The table below shows what labels are used by specific secrets:

Secret Name Labels
ci-argocd app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=argocd
ci-defectdojo app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=defectdojo
ci-dependency-track app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=dependency-track
ci-jira app.edp.epam.com/secret-type=jira
ci-nexus app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=nexus
ci-sonarqube app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=sonar
gerrit-ciuser-sshkey app.edp.epam.com/secret-type=repository
kaniko-docker-config app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=registry
regcred app.edp.epam.com/integration-secret=true
app.edp.epam.com/secret-type=registry

Labels Usage in Tekton Pipeline Runs⚓︎

The table below displays what labels are used in specific Tekton pipelines:

PipelineRun Labels
review-pipeline app.edp.epam.com/codebase: <codebase_name>
app.edp.epam.com/codebasebranch: <codebase_name>-<branch_name>
app.edp.epam.com/pipelinetype: review
build-pipeline app.edp.epam.com/codebase: <codebase_name>
app.edp.epam.com/codebasebranch: <codebase_name>-<branch_name>
app.edp.epam.com/pipelinetype: build
autotest-runner-pipeline app.edp.epam.com/pipeline: <pipeline_name>
app.edp.epam.com/pipelinetype: autotestRunner
app.edp.epam.com/stage: <stage>
autotest-pipeline app.edp.epam.com/branch: <branch>
app.edp.epam.com/codebase: <codebase_name>
app.edp.epam.com/parentPipelineRun: <cd_pipeline>-<stage>
app.edp.epam.com/pipeline: <cd_pipeline>
app.edp.epam.com/stage: <stage>
deploy app.edp.epam.com/cdpipeline: <cd_pipeline>
app.edp.epam.com/cdstage: <cd_stage_name>
app.edp.epam.com/pipelinetype: deploy

Pipeline Usage Example⚓︎

To demonstrate label usage in the EDP Tekton pipelines, find below some EDP resource examples:

Codebase specification
metadata:
   ...
   name: demo
   ...
@@ -28,4 +28,4 @@
       qualityGateType: autotests
       stepName: autotest
   ...
-

The table below shows all the pipelines associated with the demo codebase:

Pipeline Name Type Labels
gerrit-npm-react-app-review Review app.edp.epam.com/codebase: demo
app.edp.epam.com/codebasebranch: demo-main
app.edp.epam.com/pipelinetype: review
gerrit-npm-react-app-build-default Build app.edp.epam.com/codebase: demo
app.edp.epam.com/codebasebranch: demo-main
app.edp.epam.com/pipelinetype: build
autotest-runner Deploy app.edp.epam.com/pipeline: mypipe
app.edp.epam.com/pipelinetype: autotestRunner
app.edp.epam.com/stage: dev
autotests-gradle, autotests-maven Deploy app.edp.epam.com/branch: master
app.edp.epam.com/codebase: autotests
app.edp.epam.com/parentPipelineRun: mypipe-dev-<hash>
app.edp.epam.com/pipeline: mypipe
app.edp.epam.com/stage: dev
deploy Deploy app.edp.epam.com/cdpipeline: deploy
app.edp.epam.com/cdstage: deploy-dev
app.edp.epam.com/pipelinetype: deploy

The list of all the tasks associated with the demo codebase is presented below:

Task Name Labels
init-autotest app.edp.epam.com/pipeline: mypipe
app.edp.epam.com/pipelinetype: autotestRunner
app.edp.epam.com/stage: dev
run-autotest app.edp.epam.com/branch: master
app.edp.epam.com/codebase: autotests
app.edp.epam.com/parentPipelineRun: mypipe-dev-<hash>
app.edp.epam.com/pipeline: mypipe
app.edp.epam.com/stage: dev
wait-for-autotests app.edp.epam.com/pipeline: mypipe
app.edp.epam.com/pipelinetype: autotestRunner
app.edp.epam.com/stage: dev
promote-images app.edp.epam.com/pipeline: mypipe
app.edp.epam.com/pipelinetype: autotestRunner
app.edp.epam.com/stage: dev
\ No newline at end of file +

The table below shows all the pipelines associated with the demo codebase:

Pipeline Name Type Labels
gerrit-npm-react-app-review Review app.edp.epam.com/codebase: demo
app.edp.epam.com/codebasebranch: demo-main
app.edp.epam.com/pipelinetype: review
gerrit-npm-react-app-build-default Build app.edp.epam.com/codebase: demo
app.edp.epam.com/codebasebranch: demo-main
app.edp.epam.com/pipelinetype: build
autotest-runner Deploy app.edp.epam.com/pipeline: mypipe
app.edp.epam.com/pipelinetype: autotestRunner
app.edp.epam.com/stage: dev
autotests-gradle, autotests-maven Deploy app.edp.epam.com/branch: master
app.edp.epam.com/codebase: autotests
app.edp.epam.com/parentPipelineRun: mypipe-dev-<hash>
app.edp.epam.com/pipeline: mypipe
app.edp.epam.com/stage: dev
deploy Deploy app.edp.epam.com/cdpipeline: deploy
app.edp.epam.com/cdstage: deploy-dev
app.edp.epam.com/pipelinetype: deploy

The list of all the tasks associated with the demo codebase is presented below:

Task Name Labels
init-autotest app.edp.epam.com/pipeline: mypipe
app.edp.epam.com/pipelinetype: autotestRunner
app.edp.epam.com/stage: dev
run-autotest app.edp.epam.com/branch: master
app.edp.epam.com/codebase: autotests
app.edp.epam.com/parentPipelineRun: mypipe-dev-<hash>
app.edp.epam.com/pipeline: mypipe
app.edp.epam.com/stage: dev
wait-for-autotests app.edp.epam.com/pipeline: mypipe
app.edp.epam.com/pipelinetype: autotestRunner
app.edp.epam.com/stage: dev
promote-images app.edp.epam.com/pipeline: mypipe
app.edp.epam.com/pipelinetype: autotestRunner
app.edp.epam.com/stage: dev
\ No newline at end of file diff --git a/developer-guide/autotest-coverage/index.html b/developer-guide/autotest-coverage/index.html index b41015087..fc1a7e16a 100644 --- a/developer-guide/autotest-coverage/index.html +++ b/developer-guide/autotest-coverage/index.html @@ -1 +1 @@ - Quality Control - EPAM Delivery Platform
Skip to content

Quality Control⚓︎

In EPAM Delivery Platform, we guarantee the quality of the product not only by using the most advanced tools and best practices but also by covering the whole product functionality with our dedicated automated tests.

Autotest Coverage Scheme⚓︎

Autotests are significant part of our verification flow. Continuous improvement of the verification mechanisms quality is performed to provide users with the most stable version of the platform.

The autotest coverage status is presented on the scheme below:

Autotest coverage status
Autotest coverage status

Release Testing⚓︎

In our testing flow, each release is verified by the following tests:

Test Group Description What's Covered
API Tests Tekton Gerrit, GitHub, and GitLab API long regression Codebase provisioning, reviewing and building pipelines, adding new branches, deploying applications (in a custom namespace), Jira integration, and rechecking for review pipeline.
UI Tests Tekton Gerrit, GitHub, and GitLab UI long regression Codebase provisioning, reviewing and building pipelines, adding new branches, deploying applications (in a custom namespace), Jira integration, and rechecking for review pipeline.
Short Tests Tekton Gerrit , GitHub, and GitLab API short regression Codebase provisioning, reviewing and building pipelines, deploying applications (in a custom namespace), rechecking for review pipeline
Smoke Tekton Gerrit Smoke Codebase provisioning, reviewing and building pipelines, deploying applications.
\ No newline at end of file + Quality Control - EPAM Delivery Platform
Skip to content

Quality Control⚓︎

In EPAM Delivery Platform, we guarantee the quality of the product not only by using the most advanced tools and best practices but also by covering the whole product functionality with our dedicated automated tests.

Autotest Coverage Scheme⚓︎

Autotests are significant part of our verification flow. Continuous improvement of the verification mechanisms quality is performed to provide users with the most stable version of the platform.

The autotest coverage status is presented on the scheme below:

Autotest coverage status
Autotest coverage status

Release Testing⚓︎

In our testing flow, each release is verified by the following tests:

Test Group Description What's Covered
API Tests Tekton Gerrit, GitHub, and GitLab API long regression Codebase provisioning, reviewing and building pipelines, adding new branches, deploying applications (in a custom namespace), Jira integration, and rechecking for review pipeline.
UI Tests Tekton Gerrit, GitHub, and GitLab UI long regression Codebase provisioning, reviewing and building pipelines, adding new branches, deploying applications (in a custom namespace), Jira integration, and rechecking for review pipeline.
Short Tests Tekton Gerrit , GitHub, and GitLab API short regression Codebase provisioning, reviewing and building pipelines, deploying applications (in a custom namespace), rechecking for review pipeline
Smoke Tekton Gerrit Smoke Codebase provisioning, reviewing and building pipelines, deploying applications.
\ No newline at end of file diff --git a/developer-guide/aws-deployment-diagram/index.html b/developer-guide/aws-deployment-diagram/index.html index e92b11657..b98a48349 100644 --- a/developer-guide/aws-deployment-diagram/index.html +++ b/developer-guide/aws-deployment-diagram/index.html @@ -1 +1 @@ - EDP Deployment on AWS - EPAM Delivery Platform
Skip to content

EDP Deployment on AWS⚓︎

This document describes the EPAM Delivery Platform (EDP) deployment architecture on AWS. It utilizes various AWS services such as Amazon Elastic Kubernetes Service (EKS), Amazon EC2, Amazon Route 53, and others to build and deploy software in a repeatable, automated way.

Overview⚓︎

The EDP deployment architecture consists of two AWS accounts: Shared and Explorer. The Shared account hosts shared services, while the Explorer account runs the development team workload and EDP services. Both accounts have an AWS EKS cluster deployed in multiple Availability Zones (AZs). The EKS cluster runs the EDP Services, development team workload, and shared services in the case of the Shared account.

EPAM Delivery Platform Deployment Diagram on AWS
EPAM Delivery Platform Deployment Diagram on AWS

Key Components⚓︎

  1. AWS Elastic Kubernetes Service (EKS): A managed Kubernetes service used to run the EDP Services, development team workload, and shared services. EKS provides easy deployment and management of Kubernetes clusters.
  2. Amazon EC2: Instances running within private subnets that serve as nodes for the EKS cluster. Autoscaling Groups are used to deploy these instances, allowing for scalability based on demand.
  3. Amazon Route 53: A DNS web service manages external and internal DNS records for the EDP deployment. It enables easy access to resources using user-friendly domain names.
  4. AWS Application Load Balancer (ALB): Used for managing ingress traffic into the EDP deployment. Depending on requirements, ALBs can be configured as internal or external load balancers.
  5. AWS WAF: Web Application Firewall service used to protect external ALBs from common web exploits by filtering malicious requests.
  6. AWS Certificate Manager (ACM): A service that provisions manages, and deploys SSL/TLS certificates for use with AWS services. ACM is used to manage SSL certificates for secure communication within the EDP deployment.
  7. AWS Elastic Container Registry (ECR): A fully-managed Docker container registry that stores and manages Docker images. ECR provides a secure and scalable solution for storing container images used in the EDP deployment.
  8. AWS Systems Manager Parameter Store: Used to securely store and manage secrets required by various components of the EDP deployment. Parameter Store protects sensitive information such as API keys, database credentials, and other secrets.

High Availability and Fault Tolerance⚓︎

The EKS cluster is deployed across multiple AZs to ensure high availability and fault tolerance. This allows for automatic failover in case of an AZ outage or instance failure. Autoscaling Groups automatically adjust the number of EC2 instances based on demand, ensuring scalability while maintaining availability.

Design Considerations⚓︎

Reliability⚓︎

  • Using multiple AZs ensures high availability and fault tolerance for the EKS cluster.
  • Autoscaling Groups enable automatic scaling of EC2 instances based on demand, providing reliability during peak loads.
  • Multiple NAT gateways are deployed in each AZ to ensure reliable outbound internet connectivity.

Performance Efficiency⚓︎

  • Utilizing AWS EKS allows for efficient management of Kubernetes clusters without the need for manual configuration or maintenance.
  • Spot instances can be utilized alongside on-demand instances within the EKS cluster to optimize costs while maintaining performance requirements.
  • Amazon Route 53 enables efficient DNS resolution by managing external and internal DNS records.

Security⚓︎

  • External ALBs are protected using AWS WAF, which filters out malicious traffic and protects against common web exploits.
  • ACM is used to provision SSL/TLS certificates, ensuring secure communication within the EDP deployment.
  • Secrets required by various components are securely stored and managed using the AWS Systems Manager Parameter Store.

Cost Optimization⚓︎

  • Utilizing spot and on-demand instances within the EKS cluster can significantly reduce costs while maintaining performance requirements.
  • Autoscaling Groups allow for automatic scaling of EC2 instances based on demand, ensuring optimal resource utilization and cost efficiency.

Conclusion⚓︎

The EPAM Delivery Platform (EDP) deployment architecture on AWS follows best practices and patterns from the Well-Architected Framework. By leveraging AWS services such as EKS, EC2, Route 53, ALB, WAF, ACM, and Parameter Store, the EDP provides a robust and scalable CI/CD system that enables developers to deploy and manage infrastructure and applications quickly. The architecture ensures high availability, fault tolerance, reliability, performance efficiency, security, and cost optimization for the EDP deployment.

\ No newline at end of file + EDP Deployment on AWS - EPAM Delivery Platform
Skip to content

EDP Deployment on AWS⚓︎

This document describes the EPAM Delivery Platform (EDP) deployment architecture on AWS. It utilizes various AWS services such as Amazon Elastic Kubernetes Service (EKS), Amazon EC2, Amazon Route 53, and others to build and deploy software in a repeatable, automated way.

Overview⚓︎

The EDP deployment architecture consists of two AWS accounts: Shared and Explorer. The Shared account hosts shared services, while the Explorer account runs the development team workload and EDP services. Both accounts have an AWS EKS cluster deployed in multiple Availability Zones (AZs). The EKS cluster runs the EDP Services, development team workload, and shared services in the case of the Shared account.

EPAM Delivery Platform Deployment Diagram on AWS
EPAM Delivery Platform Deployment Diagram on AWS

Key Components⚓︎

  1. AWS Elastic Kubernetes Service (EKS): A managed Kubernetes service used to run the EDP Services, development team workload, and shared services. EKS provides easy deployment and management of Kubernetes clusters.
  2. Amazon EC2: Instances running within private subnets that serve as nodes for the EKS cluster. Autoscaling Groups are used to deploy these instances, allowing for scalability based on demand.
  3. Amazon Route 53: A DNS web service manages external and internal DNS records for the EDP deployment. It enables easy access to resources using user-friendly domain names.
  4. AWS Application Load Balancer (ALB): Used for managing ingress traffic into the EDP deployment. Depending on requirements, ALBs can be configured as internal or external load balancers.
  5. AWS WAF: Web Application Firewall service used to protect external ALBs from common web exploits by filtering malicious requests.
  6. AWS Certificate Manager (ACM): A service that provisions manages, and deploys SSL/TLS certificates for use with AWS services. ACM is used to manage SSL certificates for secure communication within the EDP deployment.
  7. AWS Elastic Container Registry (ECR): A fully-managed Docker container registry that stores and manages Docker images. ECR provides a secure and scalable solution for storing container images used in the EDP deployment.
  8. AWS Systems Manager Parameter Store: Used to securely store and manage secrets required by various components of the EDP deployment. Parameter Store protects sensitive information such as API keys, database credentials, and other secrets.

High Availability and Fault Tolerance⚓︎

The EKS cluster is deployed across multiple AZs to ensure high availability and fault tolerance. This allows for automatic failover in case of an AZ outage or instance failure. Autoscaling Groups automatically adjust the number of EC2 instances based on demand, ensuring scalability while maintaining availability.

Design Considerations⚓︎

Reliability⚓︎

  • Using multiple AZs ensures high availability and fault tolerance for the EKS cluster.
  • Autoscaling Groups enable automatic scaling of EC2 instances based on demand, providing reliability during peak loads.
  • Multiple NAT gateways are deployed in each AZ to ensure reliable outbound internet connectivity.

Performance Efficiency⚓︎

  • Utilizing AWS EKS allows for efficient management of Kubernetes clusters without the need for manual configuration or maintenance.
  • Spot instances can be utilized alongside on-demand instances within the EKS cluster to optimize costs while maintaining performance requirements.
  • Amazon Route 53 enables efficient DNS resolution by managing external and internal DNS records.

Security⚓︎

  • External ALBs are protected using AWS WAF, which filters out malicious traffic and protects against common web exploits.
  • ACM is used to provision SSL/TLS certificates, ensuring secure communication within the EDP deployment.
  • Secrets required by various components are securely stored and managed using the AWS Systems Manager Parameter Store.

Cost Optimization⚓︎

  • Utilizing spot and on-demand instances within the EKS cluster can significantly reduce costs while maintaining performance requirements.
  • Autoscaling Groups allow for automatic scaling of EC2 instances based on demand, ensuring optimal resource utilization and cost efficiency.

Conclusion⚓︎

The EPAM Delivery Platform (EDP) deployment architecture on AWS follows best practices and patterns from the Well-Architected Framework. By leveraging AWS services such as EKS, EC2, Route 53, ALB, WAF, ACM, and Parameter Store, the EDP provides a robust and scalable CI/CD system that enables developers to deploy and manage infrastructure and applications quickly. The architecture ensures high availability, fault tolerance, reliability, performance efficiency, security, and cost optimization for the EDP deployment.

\ No newline at end of file diff --git a/developer-guide/aws-infrastructure-cost-estimation/index.html b/developer-guide/aws-infrastructure-cost-estimation/index.html index 6351e2892..9aceaf9dc 100644 --- a/developer-guide/aws-infrastructure-cost-estimation/index.html +++ b/developer-guide/aws-infrastructure-cost-estimation/index.html @@ -1 +1 @@ - AWS Infrastructure Cost Estimation - EPAM Delivery Platform
Skip to content

AWS Infrastructure Cost Estimation⚓︎

Effective planning and budgeting are essential for developing applications in cloud computing, with a key part being accurate infrastructure cost estimation. This not only helps in keeping within budget but also enables informed decision-making and resource optimization for project viability.

This guide aims to offer an in-depth look at the factors affecting AWS infrastructure costs for KubeRocketCI and includes analytics and tools for cost estimation.

Platform Components and Approximate Costs⚓︎

This section contains tables outlining the key components of our AWS infrastructure, including a brief description of each component's role, its purpose within our infrastructure, and an estimate of its monthly cost.

Note

The costs mentioned below are estimates. For the most accurate and up-to-date pricing, please refer to the AWS official documentation.

The table below outlines key AWS infrastructure components for KubeRocketCI, detailing each component's role, purpose, and estimated monthly cost:

Component Description Purpose Within Infrastructure
Application Load Balancer (ALB) Distributes incoming application traffic across multiple targets. Ensures high availability and fault tolerance for our applications.
Virtual Private Cloud (VPC) Provides an isolated section of the AWS cloud where resources can be launched. Segregates our infrastructure for enhanced security and management.
3x Network Address Translation (NAT) Gateways Enables instances in a private subnet to connect to the internet or other AWS services. Provides internet access to EC2 instances without exposing them to the public internet.
Elastic Container Registry (ECR) A fully managed container registry. Stores, manages, and deploys container images.
Elastic Kubernetes Service (EKS) A managed Kubernetes service. Simplifies running Kubernetes applications on AWS.
Elastic Block Store (EBS) Provides persistent block storage volumes for use with EC2 instances. Offers highly available and durable storage for our applications.
Elastic Compute Cloud (EC2) Provides scalable computing capacity. Hosts our applications, supporting varied compute workloads.

The table below presents an itemized estimate of monthly costs for KubeRocketCI's AWS infrastructure components, including ALB, VPC, EC2, and more:

Component Approximate Monthly Cost
Application Load Balancer (ALB) $30.00
Virtual Private Cloud (VPC)
- 3x Network Address Translation Gateways
- 3x Public IPv4 Address

$113.88
$10.95
Elastic Container Registry (ECR) $5.00
Elastic Kubernetes Service (EKS)
- 1x EKS Clusters

$73.00
Elastic Block Store (EBS) $14.28
Elastic Compute Cloud (EC2)
- 2x c5.2xlarge (Spot)
- 2x c5.2xlarge (On-Demand)

$219.11
$576.00

AWS Pricing Calculator⚓︎

To further assist in your planning and budgeting efforts, we have pre-configured the AWS Pricing Calculator with inputs matching our infrastructure setup. This tool allows you to explore and adjust the cost estimation based on your specific needs, giving you a personalized overview of potential expenses.

Access the AWS Pricing Calculator with our pre-configured setup here: AWS Pricing Calculator

\ No newline at end of file + AWS Infrastructure Cost Estimation - EPAM Delivery Platform
Skip to content

AWS Infrastructure Cost Estimation⚓︎

Effective planning and budgeting are essential for developing applications in cloud computing, with a key part being accurate infrastructure cost estimation. This not only helps in keeping within budget but also enables informed decision-making and resource optimization for project viability.

This guide aims to offer an in-depth look at the factors affecting AWS infrastructure costs for KubeRocketCI and includes analytics and tools for cost estimation.

Platform Components and Approximate Costs⚓︎

This section contains tables outlining the key components of our AWS infrastructure, including a brief description of each component's role, its purpose within our infrastructure, and an estimate of its monthly cost.

Note

The costs mentioned below are estimates. For the most accurate and up-to-date pricing, please refer to the AWS official documentation.

The table below outlines key AWS infrastructure components for KubeRocketCI, detailing each component's role, purpose, and estimated monthly cost:

Component Description Purpose Within Infrastructure
Application Load Balancer (ALB) Distributes incoming application traffic across multiple targets. Ensures high availability and fault tolerance for our applications.
Virtual Private Cloud (VPC) Provides an isolated section of the AWS cloud where resources can be launched. Segregates our infrastructure for enhanced security and management.
3x Network Address Translation (NAT) Gateways Enables instances in a private subnet to connect to the internet or other AWS services. Provides internet access to EC2 instances without exposing them to the public internet.
Elastic Container Registry (ECR) A fully managed container registry. Stores, manages, and deploys container images.
Elastic Kubernetes Service (EKS) A managed Kubernetes service. Simplifies running Kubernetes applications on AWS.
Elastic Block Store (EBS) Provides persistent block storage volumes for use with EC2 instances. Offers highly available and durable storage for our applications.
Elastic Compute Cloud (EC2) Provides scalable computing capacity. Hosts our applications, supporting varied compute workloads.

The table below presents an itemized estimate of monthly costs for KubeRocketCI's AWS infrastructure components, including ALB, VPC, EC2, and more:

Component Approximate Monthly Cost
Application Load Balancer (ALB) $30.00
Virtual Private Cloud (VPC)
- 3x Network Address Translation Gateways
- 3x Public IPv4 Address

$113.88
$10.95
Elastic Container Registry (ECR) $5.00
Elastic Kubernetes Service (EKS)
- 1x EKS Clusters

$73.00
Elastic Block Store (EBS) $14.28
Elastic Compute Cloud (EC2)
- 2x c5.2xlarge (Spot)
- 2x c5.2xlarge (On-Demand)

$219.11
$576.00

AWS Pricing Calculator⚓︎

To further assist in your planning and budgeting efforts, we have pre-configured the AWS Pricing Calculator with inputs matching our infrastructure setup. This tool allows you to explore and adjust the cost estimation based on your specific needs, giving you a personalized overview of potential expenses.

Access the AWS Pricing Calculator with our pre-configured setup here: AWS Pricing Calculator

\ No newline at end of file diff --git a/developer-guide/aws-reference-architecture/index.html b/developer-guide/aws-reference-architecture/index.html index 5bb66c00e..371ebb479 100644 --- a/developer-guide/aws-reference-architecture/index.html +++ b/developer-guide/aws-reference-architecture/index.html @@ -1 +1 @@ - EDP Reference Architecture on AWS - EPAM Delivery Platform
Skip to content

EDP Reference Architecture on AWS⚓︎

The reference architecture of the EPAM Delivery Platform (EDP) on AWS is designed to provide a robust and scalable CI/CD system for developing and deploying software in a repeatable and automated manner. The architecture leverages AWS Managed Services to enable developers to quickly deploy and manage infrastructure and applications. EDP recommends to follow the best practices and patterns from the Well-Architected Framework, the AWS Architecture Center, and EKS Best Practices Guide.

Architecture Details⚓︎

The AWS Cloud comprises three accounts: Production, Shared, and Development.

Note

AWS Account management is out of scope for this document.

Each account serves specific purposes:

  • The Production account is used to host production workloads. The Production account serves as the final destination for deploying business applications. It maintains a separate ECR registry to store Docker images for production-level applications. The environment is designed to be highly resilient and scalable, leveraging the EPAM Delivery Platform's CI/CD pipeline to ensure consistent and automated deployments. With proper access control and separation from development environments, the Production account provides a stable and secure environment for running mission-critical applications.
  • The Development account is dedicated to development workload and lower environments. This account hosts the EDP itself, running on AWS EKS. It provides developers an isolated environment to build, test, and deploy their applications in lower environments, ensuring separation from production workloads. Developers can connect to the AWS Cloud using a VPN, enforcing secure access.
  • The Shared holds shared services that are accessible to all accounts within the organization. These services include SonarQube, Nexus, and Keycloak, which are deployed in Kubernetes Clusters managed by AWS Elastic Kubernetes Service (EKS). The shared services leverage AWS RDS, AWS EFS, and AWS ALB/NLB. The deployment of the shared services is automated using Kubernetes cluster-addons approach with GitOps and Argo CD.

EPAM Delivery Platform Reference Architecture on AWS
EPAM Delivery Platform Reference Architecture on AWS

Infrastructure as Code⚓︎

Infrastructure as Code (IaC) is a key principle in the EPAM Delivery Platform architecture. Terraform is the IaC tool to provision and manage all services in each account. AWS S3 and AWS DynamoDB serve as the backend for Terraform state, ensuring consistency and reliability in the deployment process. This approach enables the architecture to be version-controlled and allows for easy replication and reproducibility of environments.

Container Registry⚓︎

The architecture utilizes AWS Elastic Container Registry (ECR) as a Docker Registry for container image management. ECR offers a secure, scalable, and reliable solution for storing and managing container images. It integrates seamlessly with other AWS services and provides a highly available and durable storage solution for containers in the CI/CD pipeline.

IAM Roles for Service Accounts (IRSA)⚓︎

The EPAM Delivery Platform implements IAM Roles for Service Accounts (IRSA) to provide secure access to AWS services from Kubernetes Clusters. This feature enables fine-grained access control with individual Kubernetes pods assuming specific IAM roles for authenticated access to AWS resources. IRSA eliminates the need for managing and distributing access keys within the cluster, significantly enhancing security and reducing operational complexity.

SSL Certificates⚓︎

The architecture uses the AWS Certificate Manager (ACM) to secure communication between services to provide SSL certificates. ACM eliminates the need to manually manage SSL/TLS certificates, automating the renewal and deployment process. The EDP ensures secure and encrypted traffic within its environment by leveraging ACM.

AWS WAF⚓︎

The architecture's external Application Load Balancer (ALB) endpoint is protected by the AWS Web Application Firewall (WAF). WAF protects against common web exploits and ensures the security and availability of the applications hosted within the EDP. It offers regular rule updates and easy integration with other AWS services.

Parameter Store and Secrets Manager⚓︎

The architecture leverages the AWS Systems Manager Parameter Store and Secrets Manager to securely store and manage all secrets and parameters utilized within the EKS clusters—parameter Store stores general configuration information, such as database connection strings and API keys. In contrast, Secrets Manager securely stores sensitive information, such as passwords and access tokens. By centralizing secrets management, the architecture ensures proper access control and reduces the risk of unauthorized access.

Observability and Monitoring⚓︎

For observability and monitoring, the EDP leverages a suite of AWS Managed Services designed to provide comprehensive insights into the performance and health of applications and infrastructure:

AWS CloudWatch is utilized for monitoring and observability, offering detailed insights into application and infrastructure performance. It enables real-time monitoring of logs, metrics, and events, facilitating proactive issue resolution and performance optimization.

AWS OpenSearch Service (successor to Amazon Elasticsearch Service) provides powerful search and analytics capabilities. It allows for the analysis of log data and metrics, supporting enhanced application monitoring and user experience optimization.

AWS Managed Grafana offers a scalable, secure, and fully managed Grafana service, enabling developers to create and share dashboards for visualizing real-time data.

AWS Prometheus Service, a managed Prometheus-compatible monitoring service, is used for monitoring Kubernetes and container environments. It supports powerful queries and provides detailed insights into container and microservices architectures.

Summary⚓︎

The reference architecture of the EPAM Delivery Platform on AWS provides a comprehensive and scalable environment for building and deploying software applications. With a strong focus on automation, security, and best practices, this architecture enables developers to leverage the full potential of AWS services while following industry-standard DevOps practices.

\ No newline at end of file + EDP Reference Architecture on AWS - EPAM Delivery Platform
Skip to content

EDP Reference Architecture on AWS⚓︎

The reference architecture of the EPAM Delivery Platform (EDP) on AWS is designed to provide a robust and scalable CI/CD system for developing and deploying software in a repeatable and automated manner. The architecture leverages AWS Managed Services to enable developers to quickly deploy and manage infrastructure and applications. EDP recommends to follow the best practices and patterns from the Well-Architected Framework, the AWS Architecture Center, and EKS Best Practices Guide.

Architecture Details⚓︎

The AWS Cloud comprises three accounts: Production, Shared, and Development.

Note

AWS Account management is out of scope for this document.

Each account serves specific purposes:

  • The Production account is used to host production workloads. The Production account serves as the final destination for deploying business applications. It maintains a separate ECR registry to store Docker images for production-level applications. The environment is designed to be highly resilient and scalable, leveraging the EPAM Delivery Platform's CI/CD pipeline to ensure consistent and automated deployments. With proper access control and separation from development environments, the Production account provides a stable and secure environment for running mission-critical applications.
  • The Development account is dedicated to development workload and lower environments. This account hosts the EDP itself, running on AWS EKS. It provides developers an isolated environment to build, test, and deploy their applications in lower environments, ensuring separation from production workloads. Developers can connect to the AWS Cloud using a VPN, enforcing secure access.
  • The Shared holds shared services that are accessible to all accounts within the organization. These services include SonarQube, Nexus, and Keycloak, which are deployed in Kubernetes Clusters managed by AWS Elastic Kubernetes Service (EKS). The shared services leverage AWS RDS, AWS EFS, and AWS ALB/NLB. The deployment of the shared services is automated using Kubernetes cluster-addons approach with GitOps and Argo CD.

EPAM Delivery Platform Reference Architecture on AWS
EPAM Delivery Platform Reference Architecture on AWS

Infrastructure as Code⚓︎

Infrastructure as Code (IaC) is a key principle in the EPAM Delivery Platform architecture. Terraform is the IaC tool to provision and manage all services in each account. AWS S3 and AWS DynamoDB serve as the backend for Terraform state, ensuring consistency and reliability in the deployment process. This approach enables the architecture to be version-controlled and allows for easy replication and reproducibility of environments.

Container Registry⚓︎

The architecture utilizes AWS Elastic Container Registry (ECR) as a Docker Registry for container image management. ECR offers a secure, scalable, and reliable solution for storing and managing container images. It integrates seamlessly with other AWS services and provides a highly available and durable storage solution for containers in the CI/CD pipeline.

IAM Roles for Service Accounts (IRSA)⚓︎

The EPAM Delivery Platform implements IAM Roles for Service Accounts (IRSA) to provide secure access to AWS services from Kubernetes Clusters. This feature enables fine-grained access control with individual Kubernetes pods assuming specific IAM roles for authenticated access to AWS resources. IRSA eliminates the need for managing and distributing access keys within the cluster, significantly enhancing security and reducing operational complexity.

SSL Certificates⚓︎

The architecture uses the AWS Certificate Manager (ACM) to secure communication between services to provide SSL certificates. ACM eliminates the need to manually manage SSL/TLS certificates, automating the renewal and deployment process. The EDP ensures secure and encrypted traffic within its environment by leveraging ACM.

AWS WAF⚓︎

The architecture's external Application Load Balancer (ALB) endpoint is protected by the AWS Web Application Firewall (WAF). WAF protects against common web exploits and ensures the security and availability of the applications hosted within the EDP. It offers regular rule updates and easy integration with other AWS services.

Parameter Store and Secrets Manager⚓︎

The architecture leverages the AWS Systems Manager Parameter Store and Secrets Manager to securely store and manage all secrets and parameters utilized within the EKS clusters—parameter Store stores general configuration information, such as database connection strings and API keys. In contrast, Secrets Manager securely stores sensitive information, such as passwords and access tokens. By centralizing secrets management, the architecture ensures proper access control and reduces the risk of unauthorized access.

Observability and Monitoring⚓︎

For observability and monitoring, the EDP leverages a suite of AWS Managed Services designed to provide comprehensive insights into the performance and health of applications and infrastructure:

AWS CloudWatch is utilized for monitoring and observability, offering detailed insights into application and infrastructure performance. It enables real-time monitoring of logs, metrics, and events, facilitating proactive issue resolution and performance optimization.

AWS OpenSearch Service (successor to Amazon Elasticsearch Service) provides powerful search and analytics capabilities. It allows for the analysis of log data and metrics, supporting enhanced application monitoring and user experience optimization.

AWS Managed Grafana offers a scalable, secure, and fully managed Grafana service, enabling developers to create and share dashboards for visualizing real-time data.

AWS Prometheus Service, a managed Prometheus-compatible monitoring service, is used for monitoring Kubernetes and container environments. It supports powerful queries and provides detailed insights into container and microservices architectures.

Summary⚓︎

The reference architecture of the EPAM Delivery Platform on AWS provides a comprehensive and scalable environment for building and deploying software applications. With a strong focus on automation, security, and best practices, this architecture enables developers to leverage the full potential of AWS services while following industry-standard DevOps practices.

\ No newline at end of file diff --git a/developer-guide/edp-workflow/index.html b/developer-guide/edp-workflow/index.html index 2d102c9d4..27e55ecfc 100644 --- a/developer-guide/edp-workflow/index.html +++ b/developer-guide/edp-workflow/index.html @@ -1,4 +1,4 @@ - KubeRocketCI Project Rules. Working Process - EPAM Delivery Platform
Skip to content

KubeRocketCI Project Rules. Working Process⚓︎

This page contains the details on the project rules and working process for KubeRocketCI team and contributors. Explore the main points about working with GitHub, following the main commit flow, as well as the details about commit types and message below.

Project Rules⚓︎

Before starting the development, please check the project rules:

  1. It is highly recommended to become familiar with the GitHub flow. For details, please refer to the GitHub official documentation and pay attention to the main points:

    a. Creating pull requests in GitHub.

    b. Resolution of Merge Conflict.

    c. Comments resolution.

    d. One GitHub task should have one Pull Request (PR) if it doesn't change multiple operators. If there are many changes within one PR, amend the commit.

  2. Only the Assignee is responsible for the PR merger and Jira task status.

  3. Every PR should be merged in a timely manner.

  4. Log time to Jira ticket.

Working Process⚓︎

With KubeRocketCI, the main workflow is based on the getting a Jira task and creating a Pull Request according to the rules described below.

Workflow

Get Jira task → implement, verify by yourself the results → create Pull Request (PR) → send for review → resolve comments/add changes, ask colleagues for the final review → track the PR merge → verify by yourself the results → change the status in the Jira ticket to CODE COMPLETE or RESOLVED → share necessary links with a QA specialist in the QA Verification channel → QA specialist closes the Jira task after his verification → Jira task should be CLOSED.

Commit Flow

  1. Get a task in the Jira/GitHub dashboard. Please be aware of the following points:

    a. Every task has a reporter who can provide more details in case something is not clear.

    b. The responsible person for the task and code implementation is the assignee who tracks the following:

    • Actual Jira task status.
    • Time logging.
    • Add comments, attach necessary files.
    • In comments, add link that refers to the merged PR (optional, if not related to many repositories).
    • Code review and the final merge.
    • MS Teams chats - ping other colleagues, answer questions, etc.
    • Verification by a QA specialist.
    • Bug fixing.

    c. Pay attention to the task Status that differs in different entities, the workflow will help to see the whole task processing:

    View Jira workflow
    View Jira workflow

    d. There are several entities that are used on the KubeRocketCI project: Story, Improvement, Task, Bug.

    a. Every task has a reporter who can provide more details in case something is not clear.

    b. The responsible person for the task and code implementation is the assignee who tracks the following:

    • Actual GitHub task status.
    • Add comments, attach necessary files.
    • In comments, add link that refers to the merged PR (optional, if not related to many repositories).
    • Code review and the final merge.
    • MS Teams chats - ping other colleagues, answer questions, etc.
    • Verification by a QA specialist.
    • Bug fixing.

    c. If the task is created on your own, make sure it is populated completely. See an example below:

    GitHub issue
    GitHub issue

  2. Implement feature, improvement, fix and check the results on your own. If it is impossible to check the results of your work before the merge, verify all later.

  3. When committing, use the pattern: commit type: Commit message (#GitHub ticket number).

    a. commit type:

    feat: (new feature for the user, not a new feature for build script)

    fix: (bug fix for the user, not a fix to a build script)

    docs: (changes to the documentation)

    style: (formatting, missing semicolons, etc; no production code change)

    refactor: (refactoring production code, eg. renaming a variable)

    test: (adding missing tests, refactoring tests; no production code change)

    chore: (updating grunt tasks etc; no production code change)

    !: (added to other commit types to mark breaking changes) For example:

    feat!: Add ingress links column into Applications table on stage page (#77)
    + KubeRocketCI Project Rules. Working Process - EPAM Delivery Platform      

    KubeRocketCI Project Rules. Working Process⚓︎

    This page contains the details on the project rules and working process for KubeRocketCI team and contributors. Explore the main points about working with GitHub, following the main commit flow, as well as the details about commit types and message below.

    Project Rules⚓︎

    Before starting the development, please check the project rules:

    1. It is highly recommended to become familiar with the GitHub flow. For details, please refer to the GitHub official documentation and pay attention to the main points:

      a. Creating pull requests in GitHub.

      b. Resolution of Merge Conflict.

      c. Comments resolution.

      d. One GitHub task should have one Pull Request (PR) if it doesn't change multiple operators. If there are many changes within one PR, amend the commit.

    2. Only the Assignee is responsible for the PR merger and Jira task status.

    3. Every PR should be merged in a timely manner.

    4. Log time to Jira ticket.

    Working Process⚓︎

    With KubeRocketCI, the main workflow is based on the getting a Jira task and creating a Pull Request according to the rules described below.

    Workflow

    Get Jira task → implement, verify by yourself the results → create Pull Request (PR) → send for review → resolve comments/add changes, ask colleagues for the final review → track the PR merge → verify by yourself the results → change the status in the Jira ticket to CODE COMPLETE or RESOLVED → share necessary links with a QA specialist in the QA Verification channel → QA specialist closes the Jira task after his verification → Jira task should be CLOSED.

    Commit Flow

    1. Get a task in the Jira/GitHub dashboard. Please be aware of the following points:

      a. Every task has a reporter who can provide more details in case something is not clear.

      b. The responsible person for the task and code implementation is the assignee who tracks the following:

      • Actual Jira task status.
      • Time logging.
      • Add comments, attach necessary files.
      • In comments, add link that refers to the merged PR (optional, if not related to many repositories).
      • Code review and the final merge.
      • MS Teams chats - ping other colleagues, answer questions, etc.
      • Verification by a QA specialist.
      • Bug fixing.

      c. Pay attention to the task Status that differs in different entities, the workflow will help to see the whole task processing:

      View Jira workflow
      View Jira workflow

      d. There are several entities that are used on the KubeRocketCI project: Story, Improvement, Task, Bug.

      a. Every task has a reporter who can provide more details in case something is not clear.

      b. The responsible person for the task and code implementation is the assignee who tracks the following:

      • Actual GitHub task status.
      • Add comments, attach necessary files.
      • In comments, add link that refers to the merged PR (optional, if not related to many repositories).
      • Code review and the final merge.
      • MS Teams chats - ping other colleagues, answer questions, etc.
      • Verification by a QA specialist.
      • Bug fixing.

      c. If the task is created on your own, make sure it is populated completely. See an example below:

      GitHub issue
      GitHub issue

    2. Implement feature, improvement, fix and check the results on your own. If it is impossible to check the results of your work before the merge, verify all later.

    3. When committing, use the pattern: commit type: Commit message (#GitHub ticket number).

      a. commit type:

      feat: (new feature for the user, not a new feature for build script)

      fix: (bug fix for the user, not a fix to a build script)

      docs: (changes to the documentation)

      style: (formatting, missing semicolons, etc; no production code change)

      refactor: (refactoring production code, eg. renaming a variable)

      test: (adding missing tests, refactoring tests; no production code change)

      chore: (updating grunt tasks etc; no production code change)

      !: (added to other commit types to mark breaking changes) For example:

      feat!: Add ingress links column into Applications table on stage page (#77)
       
       BREAKING CHANGE: Ingress links column has been added into the Applications table on the stage details page
      -

      b. Commit message:

      • brief, for example:

        fix: Remove secretKey duplication from registry secrets (#63)

        or

      • descriptive, for example:

        feat: Provide the ability to configure hadolint check (#88)

        * Add configuration files .hadolint.yaml and .hadolint.yml to stash

        Note

        It is mandatory to start a commit message from a capital letter.

      c. GitHub tickets are typically identified using a number preceded by the # sign and enclosed in parentheses.

      Note

      Make sure there is a descriptive commit message for a breaking change Pull Request. For example:

      feat!: Add ingress links column into Applications table on stage page (#77)

      BREAKING CHANGE: Ingress links column has been added into the Applications table on the stage details page

    4. Create a Pull Request, for details, please refer to the Code Review Process:

      GitHub issue
      GitHub issue

      Note

      If a Pull Request contains both new functionality and breaking changes, make sure the functionality description is placed before the breaking changes. For example:

      feat!: Update Gerrit to improve access

      • Implement Developers group creation process
      • Align group permissions

      BREAKING CHANGES: Update Gerrit config according to groups

    \ No newline at end of file +

    b. Commit message:

    • brief, for example:

      fix: Remove secretKey duplication from registry secrets (#63)

      or

    • descriptive, for example:

      feat: Provide the ability to configure hadolint check (#88)

      * Add configuration files .hadolint.yaml and .hadolint.yml to stash

      Note

      It is mandatory to start a commit message from a capital letter.

    c. GitHub tickets are typically identified using a number preceded by the # sign and enclosed in parentheses.

    Note

    Make sure there is a descriptive commit message for a breaking change Pull Request. For example:

    feat!: Add ingress links column into Applications table on stage page (#77)

    BREAKING CHANGE: Ingress links column has been added into the Applications table on the stage details page

  4. Create a Pull Request, for details, please refer to the Code Review Process:

    GitHub issue
    GitHub issue

    Note

    If a Pull Request contains both new functionality and breaking changes, make sure the functionality description is placed before the breaking changes. For example:

    feat!: Update Gerrit to improve access

    • Implement Developers group creation process
    • Align group permissions

    BREAKING CHANGES: Update Gerrit config according to groups

\ No newline at end of file diff --git a/developer-guide/index.html b/developer-guide/index.html index 0f7f546db..4ab3bbe28 100644 --- a/developer-guide/index.html +++ b/developer-guide/index.html @@ -1 +1 @@ - Overview - EPAM Delivery Platform
Skip to content

Overview⚓︎

The EPAM Delivery Platform (EDP) Developer Guide serves as a comprehensive technical resource specifically designed for developers. It offers detailed insights into expanding the functionalities of EDP. This section focuses on explaining the development approach and fundamental architectural blueprints that form the basis of the platform's ecosystem.

Within these pages, you'll find architectural diagrams, component schemas, and deployment strategies essential for grasping the structural elements of EDP. These technical illustrations serve as references, providing a detailed understanding of component interactions and deployment methodologies. Understanding the architecture of EDP and integrating third-party solutions into its established framework enables the creation of efficient, scalable, and customizable solutions within the EPAM Delivery Platform.

The diagram below illustrates how GitHub repositories and Docker registries are interconnected within the EDP ecosystem.

Core components
Core components
codebase-operator
codebase-operator
cd-pipeline-operator
cd-pipeline-operator
EDP Portal
(edp-headlamp)
EDP Portal...
nexus-operator
nexus-operator
sonar-operator
sonar-operator
keycloak-operator
keycloak-operator
edp-tekton
edp-tekton
edp-install
edp-install
Click the Icons
Text is not SVG - cannot display

Release Channels⚓︎

As a publicly available product, the EPAM Delivery Platform relies on various channels to share information, gather feedback, and distribute new releases effectively. This section outlines the diverse channels through which users can engage with our platform and stay informed about the latest developments and enhancements.

Marketplaces⚓︎

Our product is presented on AWS and Civo marketplaces. It's essential to ensure that the product information on these platforms is up-to-date and accurately reflects the latest version of our software:

OperatorHub⚓︎

Our product operators are showcased on OperatorHub, enabling seamless integration and management capabilities:

GitHub Repositories⚓︎

Our platform components, optional enhancements, add-ons, and deployment resources are hosted on GitHub repositories. Explore the following repositories to access the source code of components.

Platform Components⚓︎

Each platform component is available in its corresponding GitHub project:

Optional Components⚓︎

These optional components enhance the platform's installation and configuration experience:

Add-ons Repository⚓︎

The Add-ons repository provides a streamlined pathway for deploying the all-in-one solution:

Tekton Custom Library⚓︎

Explore additional tools and customizations in our Tekton Custom Library:

Platform Test Data⚓︎

Access test data from the 'Create' onboarding strategy:

Helm Charts⚓︎

Helm chart artifacts are available in repository:

DockerHub⚓︎

Our DockerHub repository hosts Docker images for various platform components:

Social Media⚓︎

To maintain an active presence on social media channels and share valuable content about our software releases, we continuously publish materials across the following media:

\ No newline at end of file + Overview - EPAM Delivery Platform
Skip to content

Overview⚓︎

The EPAM Delivery Platform (EDP) Developer Guide serves as a comprehensive technical resource specifically designed for developers. It offers detailed insights into expanding the functionalities of EDP. This section focuses on explaining the development approach and fundamental architectural blueprints that form the basis of the platform's ecosystem.

Within these pages, you'll find architectural diagrams, component schemas, and deployment strategies essential for grasping the structural elements of EDP. These technical illustrations serve as references, providing a detailed understanding of component interactions and deployment methodologies. Understanding the architecture of EDP and integrating third-party solutions into its established framework enables the creation of efficient, scalable, and customizable solutions within the EPAM Delivery Platform.

The diagram below illustrates how GitHub repositories and Docker registries are interconnected within the EDP ecosystem.

Core components
Core components
codebase-operator
codebase-operator
cd-pipeline-operator
cd-pipeline-operator
EDP Portal
(edp-headlamp)
EDP Portal...
nexus-operator
nexus-operator
sonar-operator
sonar-operator
keycloak-operator
keycloak-operator
edp-tekton
edp-tekton
edp-install
edp-install
Click the Icons
Text is not SVG - cannot display

Release Channels⚓︎

As a publicly available product, the EPAM Delivery Platform relies on various channels to share information, gather feedback, and distribute new releases effectively. This section outlines the diverse channels through which users can engage with our platform and stay informed about the latest developments and enhancements.

Marketplaces⚓︎

Our product is presented on AWS and Civo marketplaces. It's essential to ensure that the product information on these platforms is up-to-date and accurately reflects the latest version of our software:

OperatorHub⚓︎

Our product operators are showcased on OperatorHub, enabling seamless integration and management capabilities:

GitHub Repositories⚓︎

Our platform components, optional enhancements, add-ons, and deployment resources are hosted on GitHub repositories. Explore the following repositories to access the source code of components.

Platform Components⚓︎

Each platform component is available in its corresponding GitHub project:

Optional Components⚓︎

These optional components enhance the platform's installation and configuration experience:

Add-ons Repository⚓︎

The Add-ons repository provides a streamlined pathway for deploying the all-in-one solution:

Tekton Custom Library⚓︎

Explore additional tools and customizations in our Tekton Custom Library:

Platform Test Data⚓︎

Access test data from the 'Create' onboarding strategy:

Helm Charts⚓︎

Helm chart artifacts are available in repository:

DockerHub⚓︎

Our DockerHub repository hosts Docker images for various platform components:

Social Media⚓︎

To maintain an active presence on social media channels and share valuable content about our software releases, we continuously publish materials across the following media:

\ No newline at end of file diff --git a/developer-guide/kubernetes-deployment/index.html b/developer-guide/kubernetes-deployment/index.html index 2d0106973..87d292679 100644 --- a/developer-guide/kubernetes-deployment/index.html +++ b/developer-guide/kubernetes-deployment/index.html @@ -1 +1 @@ - Kubernetes Deployment - EPAM Delivery Platform
Skip to content

Kubernetes Deployment⚓︎

This section provides a comprehensive overview of the EDP deployment approach on a Kubernetes cluster. EDP is designed and functions based on a set of key guiding principles:

  • Operator Pattern Approach: Approach is used for deployment and configuration, ensuring that the platform aligns with Kubernetes native methodologies (see schema below).
  • Loosely Coupling: EDP comprises several loosely coupled operators responsible for different platform parts. These operators can be deployed independently, enabling the most straightforward platform customization and delivery approach.

    Kubernetes Operator
    Kubernetes Operator

The following deployment diagram illustrates the platform's core components, which provide the minimum functional capabilities required for the platform operation: build, push, deploy, and run applications. The platform relies on several mandatory dependencies:

  • Ingress: An ingress controller responsible for routing traffic to the platform.
  • Tekton Stack: Includes Tekton pipelines, triggers, dashboard, chains, etc.
  • ArgoCD: Responsible for GitOps deployment.

EPAM Delivery Platform Deployment Diagram
EPAM Delivery Platform Deployment Diagram

  • Codebase Operator: Responsible for managing git repositories, versioning, and branching. It also implements Jira integration controller.
  • CD Pipeline Operator: Manages Continuous Delivery (CD) pipelines and CD stages (which is an abstraction of Kubernetes Namespace). Operator acts as the bridge between the artifact and deployment tools, like Argo CD. It defines the CD pipeline structure, artifacts promotion logic and triggers the pipeline execution.
  • Tekton Pipelines: Manages Tekton pipelines and processes events (EventListener, Interceptor) from Version Control Systems. The pipelines are integrated with external tools like SonarQube, Nexus, etc.
  • EDP Portal: This is the User Interface (UI) component, built on top of Headlamp.

Business applications are deployed on the platform using the CD Pipeline Operator and Argo CD. By default, the CD Pipeline Operator uses Argo CD as a deployment tool. However, it can be replaced with any other tool, like FluxCD, Spinnaker, etc. The target environment for the application deployment is a Kubernetes cluster where EDP is deployed, but it can be any other Kubernetes cluster.

\ No newline at end of file + Kubernetes Deployment - EPAM Delivery Platform
Skip to content

Kubernetes Deployment⚓︎

This section provides a comprehensive overview of the EDP deployment approach on a Kubernetes cluster. EDP is designed and functions based on a set of key guiding principles:

  • Operator Pattern Approach: Approach is used for deployment and configuration, ensuring that the platform aligns with Kubernetes native methodologies (see schema below).
  • Loosely Coupling: EDP comprises several loosely coupled operators responsible for different platform parts. These operators can be deployed independently, enabling the most straightforward platform customization and delivery approach.

    Kubernetes Operator
    Kubernetes Operator

The following deployment diagram illustrates the platform's core components, which provide the minimum functional capabilities required for the platform operation: build, push, deploy, and run applications. The platform relies on several mandatory dependencies:

  • Ingress: An ingress controller responsible for routing traffic to the platform.
  • Tekton Stack: Includes Tekton pipelines, triggers, dashboard, chains, etc.
  • ArgoCD: Responsible for GitOps deployment.

EPAM Delivery Platform Deployment Diagram
EPAM Delivery Platform Deployment Diagram

  • Codebase Operator: Responsible for managing git repositories, versioning, and branching. It also implements Jira integration controller.
  • CD Pipeline Operator: Manages Continuous Delivery (CD) pipelines and CD stages (which is an abstraction of Kubernetes Namespace). Operator acts as the bridge between the artifact and deployment tools, like Argo CD. It defines the CD pipeline structure, artifacts promotion logic and triggers the pipeline execution.
  • Tekton Pipelines: Manages Tekton pipelines and processes events (EventListener, Interceptor) from Version Control Systems. The pipelines are integrated with external tools like SonarQube, Nexus, etc.
  • EDP Portal: This is the User Interface (UI) component, built on top of Headlamp.

Business applications are deployed on the platform using the CD Pipeline Operator and Argo CD. By default, the CD Pipeline Operator uses Argo CD as a deployment tool. However, it can be replaced with any other tool, like FluxCD, Spinnaker, etc. The target environment for the application deployment is a Kubernetes cluster where EDP is deployed, but it can be any other Kubernetes cluster.

\ No newline at end of file diff --git a/developer-guide/local-development/index.html b/developer-guide/local-development/index.html index eaf388428..fc321960e 100644 --- a/developer-guide/local-development/index.html +++ b/developer-guide/local-development/index.html @@ -1,6 +1,6 @@ - Operator Development - EPAM Delivery Platform
Skip to content

Operator Development⚓︎

This page is intended for developers with the aim to share details on how to set up the local environment and start coding in Go language for EPAM Delivery Platform.

Prerequisites⚓︎

  • Git is installed;
  • One of our repositories where you would like to contribute is cloned locally;
  • Local Kubernetes cluster (Kind is recommended) is installed;
  • Helm is installed;
  • Any IDE (GoLand is used here as an example) is installed;
  • GoLang stable version is installed.

Note

Make sure GOPATH and GOROOT environment variables are added in PATH.

Environment Setup⚓︎

Set up your environment by following the steps below.

Set Up Your IDE⚓︎

We recommend using GoLand and enabling the Kubernetes plugin. Before installing plugins, make sure to save your work because IDE may require restarting.

Set Up Your Operator⚓︎

To set up the cloned operator, follow the three steps below:

  1. Configure Go Build Option. Open folder in GoLand, click the add_config_button button and select the Go Build option:

    Add configuration
    Add configuration

  2. Fill in the variables in Configuration tab:

    • In the Files field, indicate the path to the main.go file;
    • In the Working directory field, indicate the path to the operator;
    • In the Environment field, specify the namespace to watch by setting WATCH_NAMESPACE variable. It should equal default but it can be any other if required by the cluster specifications.
    • In the Environment field, also specify the platform type by setting PLATFORM_TYPE. It should equal either kubernetes or openshift.

    Build config
    Build config

  3. Check cluster connectivity and variables. Local development implies working within local Kubernetes clusters. Kind (Kubernetes in Docker) is recommended so set this or another environment first before running code.

Pre-commit Activities⚓︎

Before making commit and sending pull request, take care of precautionary measures to avoid crashing some other parts of the code.

Testing and Linting⚓︎

Testing and linting must be used before every single commit with no exceptions. The instructions for the commands below are written here.

It is mandatory to run test and lint to make sure the code passes the tests and meets acceptance criteria. Most operators are covered by tests so just run them by issuing the commands "make test" and "make lint":

  make test
+ Operator Development - EPAM Delivery Platform      

Operator Development⚓︎

This page is intended for developers with the aim to share details on how to set up the local environment and start coding in Go language for EPAM Delivery Platform.

Prerequisites⚓︎

  • Git is installed;
  • One of our repositories where you would like to contribute is cloned locally;
  • Local Kubernetes cluster (Kind is recommended) is installed;
  • Helm is installed;
  • Any IDE (GoLand is used here as an example) is installed;
  • GoLang stable version is installed.

Note

Make sure GOPATH and GOROOT environment variables are added in PATH.

Environment Setup⚓︎

Set up your environment by following the steps below.

Set Up Your IDE⚓︎

We recommend using GoLand and enabling the Kubernetes plugin. Before installing plugins, make sure to save your work because IDE may require restarting.

Set Up Your Operator⚓︎

To set up the cloned operator, follow the three steps below:

  1. Configure Go Build Option. Open folder in GoLand, click the add_config_button button and select the Go Build option:

    Add configuration
    Add configuration

  2. Fill in the variables in Configuration tab:

    • In the Files field, indicate the path to the main.go file;
    • In the Working directory field, indicate the path to the operator;
    • In the Environment field, specify the namespace to watch by setting WATCH_NAMESPACE variable. It should equal default but it can be any other if required by the cluster specifications.
    • In the Environment field, also specify the platform type by setting PLATFORM_TYPE. It should equal either kubernetes or openshift.

    Build config
    Build config

  3. Check cluster connectivity and variables. Local development implies working within local Kubernetes clusters. Kind (Kubernetes in Docker) is recommended so set this or another environment first before running code.

Pre-commit Activities⚓︎

Before making commit and sending pull request, take care of precautionary measures to avoid crashing some other parts of the code.

Testing and Linting⚓︎

Testing and linting must be used before every single commit with no exceptions. The instructions for the commands below are written here.

It is mandatory to run test and lint to make sure the code passes the tests and meets acceptance criteria. Most operators are covered by tests so just run them by issuing the commands "make test" and "make lint":

  make test
 

The command "make test" should give the output similar to the following:

Tests directory for one of the operators
"make test" command

  make lint
 

The command "make lint" should give the output similar to the following:

Tests directory for one of the operators
"make lint" command

Observe Auto-Generated Docs, API and Manifests⚓︎

The commands below are especially essential when making changes to API. The code is unsatisfactory if these commands fail.

  • Generate documentation in the .MD file format so the developer can read it:

    make api-docs
     

    The command "make api-docs" should give the output similar to the following:

"make api-docs" command with the file contents
"make api-docs" command with the file contents

  • There are also manifests within the operator that generate zz_generated.deepcopy.go file in /api/v1 directory. This file is necessary for the platform to work but it's time-consuming to fill it by yourself so there is a mechanism that does it automatically. Update it using the following command and check if it looks properly:

    make generate
     

    The command "make generate" should give the output similar to the following:

"make generate" command
"make generate" command

  • Refresh custom resource definitions for Kubernetes, thus allowing the cluster to know what resources it deals with.

    make manifests
    -

    The command "make manifests" should give the output similar to the following:

"make manifests" command
"make manifests" command

At the end of the procedure, you can push your code confidently to your branch and create a pull request.

That's it, you're all set! Good luck in coding!

\ No newline at end of file +

The command "make manifests" should give the output similar to the following:

"make manifests" command
"make manifests" command

At the end of the procedure, you can push your code confidently to your branch and create a pull request.

That's it, you're all set! Good luck in coding!

\ No newline at end of file diff --git a/developer-guide/mk-docs-development/index.html b/developer-guide/mk-docs-development/index.html index 588bd7d75..9c218cfc0 100644 --- a/developer-guide/mk-docs-development/index.html +++ b/developer-guide/mk-docs-development/index.html @@ -1,4 +1,4 @@ - Working With Documentation - EPAM Delivery Platform
Skip to content

Working With Documentation⚓︎

This section defines necessary steps to start developing the EDP documentation in the MkDocs Framework. The framework presents a static site generator with documentation written in Markdown. All the docs are configured with a YAML configuration file.

Note

For more details on the framework, please refer to the MkDocs official website.

There are two options for working with MkDocs:

  • Work with MkDocs if Docker is installed
  • Work with MkDocs if Docker is not installed

Please see below the detailed description of each options and choose the one that suits you.

MkDocs With Docker⚓︎

Prerequisites:

  • Docker is installed.
  • make utility is installed.

To work with MkDocs, take the following steps:

  1. Clone the edp-install repository to your local folder.

  2. Run the following command:

    make docs

  3. Enter the localhost:8000 address in the browser and check that documentation pages are available.

  4. Open the file editor, navigate to edp-install->docs and make necessary changes. Check all the changes at localhost:8000.

  5. Create a merge request with changes.

MkDocs Without Docker⚓︎

Prerequisites:

To work with MkDocs without Docker, take the following steps:

  1. Clone the edp-install repository to your local folder.

  2. Run the following command:

    pip install -r  hack/mkdocs/requirements.txt
    + Working With Documentation - EPAM Delivery Platform      

    Working With Documentation⚓︎

    This section defines necessary steps to start developing the EDP documentation in the MkDocs Framework. The framework presents a static site generator with documentation written in Markdown. All the docs are configured with a YAML configuration file.

    Note

    For more details on the framework, please refer to the MkDocs official website.

    There are two options for working with MkDocs:

    • Work with MkDocs if Docker is installed
    • Work with MkDocs if Docker is not installed

    Please see below the detailed description of each options and choose the one that suits you.

    MkDocs With Docker⚓︎

    Prerequisites:

    • Docker is installed.
    • make utility is installed.

    To work with MkDocs, take the following steps:

    1. Clone the edp-install repository to your local folder.

    2. Run the following command:

      make docs

    3. Enter the localhost:8000 address in the browser and check that documentation pages are available.

    4. Open the file editor, navigate to edp-install->docs and make necessary changes. Check all the changes at localhost:8000.

    5. Create a merge request with changes.

    MkDocs Without Docker⚓︎

    Prerequisites:

    To work with MkDocs without Docker, take the following steps:

    1. Clone the edp-install repository to your local folder.

    2. Run the following command:

      pip install -r  hack/mkdocs/requirements.txt
       
    3. Run the local development command:

      mkdocs serve --dev-addr 0.0.0.0:8000
       

      Note

      This command may not work on Windows, so a quick solution is:

      python -m mkdocs serve --dev-addr 0.0.0.0:8000
      -
    4. Enter the localhost:8000 address in the browser and check that documentation pages are available.

    5. Open the file editor, navigate to edp-install->docs and make necessary changes. Check all the changes at localhost:8000.

    6. Create a merge request with changes.

    \ No newline at end of file +
  • Enter the localhost:8000 address in the browser and check that documentation pages are available.

  • Open the file editor, navigate to edp-install->docs and make necessary changes. Check all the changes at localhost:8000.

  • Create a merge request with changes.

  • \ No newline at end of file diff --git a/developer-guide/reference-architecture/index.html b/developer-guide/reference-architecture/index.html index e365ba970..82948f614 100644 --- a/developer-guide/reference-architecture/index.html +++ b/developer-guide/reference-architecture/index.html @@ -1 +1 @@ - Reference Architecture - EPAM Delivery Platform
    Skip to content

    Reference Architecture⚓︎

    The EPAM Delivery Platform’s (EDP) Reference Architecture serves as a blueprint for software delivery, outlining the best practices, tools, and technologies leveraged by the platform to ensure efficient and high-quality software development. It provides a comprehensive guide to navigate the complexities of software delivery, from code to deployment.

    EDP operates on Kubernetes, a leading open-source system for automating deployment, scaling, and management of containerized applications. It consolidates a variety of open-source tools, ensuring a flexible and adaptable system that can seamlessly run on any public cloud or on-premises infrastructure. This versatility allows for a wide range of deployment options, catering to diverse business needs and operational requirements.

    Key Principles⚓︎

    The EPAM Delivery Platform (EDP) is built on a set of key principles that guide its design and functionality:

    • Managed Infrastructure and Container Orchestration: EDP is based on a platform that leverages managed infrastructure and container orchestration, primarily through Kubernetes or OpenShift.
    • Security: EDP places a high emphasis on security, covering aspects such as authentication, authorization, and Single Sign-On (SSO) for platform services.
    • Development and Testing Toolset: EDP provides a comprehensive set of tools for development and testing, ensuring a robust and reliable software delivery process.
    • Well-Established Engineering Process: EDP reflects EPAM’s well-established engineering practices (EngX) in its CI/CD pipelines and delivery analytics.
    • Open-Source and Cloud-Agnostic: As an open-source, cloud-agnostic solution, EDP can be run on any preferred Kubernetes or OpenShift clusters.
    • DevSecOps Excellence: EDP empowers DevSecOps by making security a mandatory quality gate.
    • Automated Testing: EDP ensures seamless and predictable regression testing through automated test analysis.

    Architecture Overview⚓︎

    EDP encompasses a comprehensive CI/CD ecosystem integrating essential tools such as the Tekton and Argo CD, augmented by additional functionalities. Within this robust framework, EDP seamlessly integrates SonarQube for continuous code quality assessment, enabling thorough analysis and ensuring adherence to coding standards. Additionally, incorporating Static Application Security Testing (SAST) toolset fortifies platform's security posture by proactively identifying vulnerabilities within the codebase. EDP leverages dedicated artifact storage solutions to manage and version application artifacts securely, ensuring streamlined deployment processes and traceability throughout the software development lifecycle. See the reference architecture diagram below:

    EPAM Delivery Platform Reference Architecture
    EPAM Delivery Platform Reference Architecture

    1. Developers access the platform by authenticating with their corporate credentials. The platform utilizes OpenID Connect (OIDC) for authentication and authorization across all tools and Kubernetes clusters. Using OIDC, EDP establishes a unified and secure authentication mechanism, ensuring seamless access control and user authentication for all integrated tools. This standardized approach upholds strict security protocols, ensuring consistency in authentication and authorization policies across the platform ecosystem. To integrate existing Identity Providers (IdPs), Keycloak serves as an identity broker on the platform. EDP offers the keycloak-operator to streamline Keycloak integration within the platform.

    2. Developers engage with the platform via the EDP Portal, an intuitive interface offering a comprehensive overview of the platform’s capabilities. This centralized hub facilitates seamless navigation and access to various platform tools and components. Within the EDP Portal, developers can generate new components (codebases). The platform integrates with version control systems, optimizing source code management, fostering collaboration, and streamlining code review processes. To create new codebases, developers utilize Application Templates, ensuring a standardized approach to application development. The platform accommodates a range of application templates such as Java, Node.js, .NET, Python, and more. Additionally, developers can design custom templates via the EDP Marketplace to cater to their specific requirements.

    3. Tekton is a potent, adaptable, and cloud-native framework designed for crafting CI/CD systems. It offers a collection of shared, reusable components that empower developers to construct, test, and deploy applications across various cloud providers or on-premises systems. As a foundational element within the EDP CI/CD ecosystem, Tekton seamlessly integrates with other tools and services, providing a robust and adaptable framework for constructing CI/CD pipelines. Tekton Pipelines allow developers to efficiently build, test, and deploy applications, while Tekton Triggers initiate pipelines based on specific events.

    4. The codebase operator is a crucial part of the platform ecosystem. It manages codebases, their creation, deletion, and scaffolding, as well as their associated resources. It provides versioning, branching, and release capabilities and enables seamless integration with Git servers and Jira.

    5. The platform has various cloud-agnostic tools that offer different functionalities, such as artifact storage, static security analysis, and code quality assessment. These tools are accessible through pipelines and codebase controllers. Additionally, the platform supports integration with managed services from cloud providers to deliver its core functionality—for instance, AWS Parameter Store stores secrets and AWS ECR - container images. AzureDevops Artifacts is an option to store artifacts leveraging Azure Cloud capabilities. SonarCloud, a cloud-based version of SonarQube, can be integrated to conduct static code analysis.

    6. The CD Pipeline Operator oversees CD pipelines and their related resources. It offers a collection of shared, reusable components for constructing CD pipelines. Integrated with Tekton and Argo CD, the CD Pipeline Operator harnesses their capabilities, ensuring a robust and dependable software delivery process. With a Kubernetes API interface, the CD Pipeline Operator facilitates the management of CD pipelines. It enables artifact promotion logic and the triggering of CD pipelines based on specific events, further enhancing the efficiency and adaptability of the software delivery workflow.

    7. Argo CD is a pivotal deployment tool adopted within the platform, embracing the GitOps delivery approach. It serves as the foundation for deploying both operational and business workloads. EDP recommends running a dedicated Argo CD instance to manage operational workloads, employing the Kubernetes add-ons approach for streamlined management.

    8. Production workloads operate in isolation within dedicated Kubernetes clusters to uphold stringent standards and ensure the utmost security and resource allocation. This approach guarantees the highest isolation and operational integrity levels for critical production systems, aligning with industry best practices. EDP strongly recommends utilizing a pull model for production deployment. In this model, production deployment is initiated by the Argo CD instance explicitly deployed for the production environment.

    Technology Stack⚓︎

    The Platform is meticulously engineered to uphold best practices in workload distribution across various environments, including development, testing (manual/automation), user acceptance (UAT), staging, and production. While lower environments like development and testing may feasibly share clusters for workload efficiency, EDP strongly advocates and enforces the necessity of segregating production workloads into dedicated clusters. This segregation ensures the highest isolation, security, and resource allocation levels for mission-critical production systems, adhering to industry standards and ensuring optimal operational integrity.

    EDP harnesses the robust capabilities of Kubernetes in conjunction with a suite of powerful tools tailored for monitoring, logging, and tracing. It integrates the Prometheus stack within ecosystem, leveraging its metrics collection, storage, and querying capabilities to enable comprehensive monitoring of system performance and health. EDP runs OpenSearch for centralized logging, enabling efficient log aggregation, analysis, and management across the platform. Incorporating OpenTelemetry enables standardized and seamless observability data collection, facilitating deep insights into platform behavior and performance. Additionally, it allows for connection with external aggregators and tools that support the OpenTelemetry protocol (OTLP).

    Platform and Tools
    Platform and Tools

    EDP integrates with GitLab, GitHub, and Gerrit for version control. These systems are foundational components enabling efficient source code management, collaboration, and code review processes.

    Platform ensures robust security measures by leveraging OpenID Connect (OIDC) for authentication and authorization across all platform tools and Kubernetes clusters. By employing OIDC, EDP establishes a unified and secure authentication mechanism, enabling seamless access control and user authentication for all tools integrated into the platform. This standardized approach ensures stringent security protocols, maintaining authentication consistency and authorization policies across the platform ecosystem.

    \ No newline at end of file + Reference Architecture - EPAM Delivery Platform
    Skip to content

    Reference Architecture⚓︎

    The EPAM Delivery Platform’s (EDP) Reference Architecture serves as a blueprint for software delivery, outlining the best practices, tools, and technologies leveraged by the platform to ensure efficient and high-quality software development. It provides a comprehensive guide to navigate the complexities of software delivery, from code to deployment.

    EDP operates on Kubernetes, a leading open-source system for automating deployment, scaling, and management of containerized applications. It consolidates a variety of open-source tools, ensuring a flexible and adaptable system that can seamlessly run on any public cloud or on-premises infrastructure. This versatility allows for a wide range of deployment options, catering to diverse business needs and operational requirements.

    Key Principles⚓︎

    The EPAM Delivery Platform (EDP) is built on a set of key principles that guide its design and functionality:

    • Managed Infrastructure and Container Orchestration: EDP is based on a platform that leverages managed infrastructure and container orchestration, primarily through Kubernetes or OpenShift.
    • Security: EDP places a high emphasis on security, covering aspects such as authentication, authorization, and Single Sign-On (SSO) for platform services.
    • Development and Testing Toolset: EDP provides a comprehensive set of tools for development and testing, ensuring a robust and reliable software delivery process.
    • Well-Established Engineering Process: EDP reflects EPAM’s well-established engineering practices (EngX) in its CI/CD pipelines and delivery analytics.
    • Open-Source and Cloud-Agnostic: As an open-source, cloud-agnostic solution, EDP can be run on any preferred Kubernetes or OpenShift clusters.
    • DevSecOps Excellence: EDP empowers DevSecOps by making security a mandatory quality gate.
    • Automated Testing: EDP ensures seamless and predictable regression testing through automated test analysis.

    Architecture Overview⚓︎

    EDP encompasses a comprehensive CI/CD ecosystem integrating essential tools such as the Tekton and Argo CD, augmented by additional functionalities. Within this robust framework, EDP seamlessly integrates SonarQube for continuous code quality assessment, enabling thorough analysis and ensuring adherence to coding standards. Additionally, incorporating Static Application Security Testing (SAST) toolset fortifies platform's security posture by proactively identifying vulnerabilities within the codebase. EDP leverages dedicated artifact storage solutions to manage and version application artifacts securely, ensuring streamlined deployment processes and traceability throughout the software development lifecycle. See the reference architecture diagram below:

    EPAM Delivery Platform Reference Architecture
    EPAM Delivery Platform Reference Architecture

    1. Developers access the platform by authenticating with their corporate credentials. The platform utilizes OpenID Connect (OIDC) for authentication and authorization across all tools and Kubernetes clusters. Using OIDC, EDP establishes a unified and secure authentication mechanism, ensuring seamless access control and user authentication for all integrated tools. This standardized approach upholds strict security protocols, ensuring consistency in authentication and authorization policies across the platform ecosystem. To integrate existing Identity Providers (IdPs), Keycloak serves as an identity broker on the platform. EDP offers the keycloak-operator to streamline Keycloak integration within the platform.

    2. Developers engage with the platform via the EDP Portal, an intuitive interface offering a comprehensive overview of the platform’s capabilities. This centralized hub facilitates seamless navigation and access to various platform tools and components. Within the EDP Portal, developers can generate new components (codebases). The platform integrates with version control systems, optimizing source code management, fostering collaboration, and streamlining code review processes. To create new codebases, developers utilize Application Templates, ensuring a standardized approach to application development. The platform accommodates a range of application templates such as Java, Node.js, .NET, Python, and more. Additionally, developers can design custom templates via the EDP Marketplace to cater to their specific requirements.

    3. Tekton is a potent, adaptable, and cloud-native framework designed for crafting CI/CD systems. It offers a collection of shared, reusable components that empower developers to construct, test, and deploy applications across various cloud providers or on-premises systems. As a foundational element within the EDP CI/CD ecosystem, Tekton seamlessly integrates with other tools and services, providing a robust and adaptable framework for constructing CI/CD pipelines. Tekton Pipelines allow developers to efficiently build, test, and deploy applications, while Tekton Triggers initiate pipelines based on specific events.

    4. The codebase operator is a crucial part of the platform ecosystem. It manages codebases, their creation, deletion, and scaffolding, as well as their associated resources. It provides versioning, branching, and release capabilities and enables seamless integration with Git servers and Jira.

    5. The platform has various cloud-agnostic tools that offer different functionalities, such as artifact storage, static security analysis, and code quality assessment. These tools are accessible through pipelines and codebase controllers. Additionally, the platform supports integration with managed services from cloud providers to deliver its core functionality—for instance, AWS Parameter Store stores secrets and AWS ECR - container images. AzureDevops Artifacts is an option to store artifacts leveraging Azure Cloud capabilities. SonarCloud, a cloud-based version of SonarQube, can be integrated to conduct static code analysis.

    6. The CD Pipeline Operator oversees CD pipelines and their related resources. It offers a collection of shared, reusable components for constructing CD pipelines. Integrated with Tekton and Argo CD, the CD Pipeline Operator harnesses their capabilities, ensuring a robust and dependable software delivery process. With a Kubernetes API interface, the CD Pipeline Operator facilitates the management of CD pipelines. It enables artifact promotion logic and the triggering of CD pipelines based on specific events, further enhancing the efficiency and adaptability of the software delivery workflow.

    7. Argo CD is a pivotal deployment tool adopted within the platform, embracing the GitOps delivery approach. It serves as the foundation for deploying both operational and business workloads. EDP recommends running a dedicated Argo CD instance to manage operational workloads, employing the Kubernetes add-ons approach for streamlined management.

    8. Production workloads operate in isolation within dedicated Kubernetes clusters to uphold stringent standards and ensure the utmost security and resource allocation. This approach guarantees the highest isolation and operational integrity levels for critical production systems, aligning with industry best practices. EDP strongly recommends utilizing a pull model for production deployment. In this model, production deployment is initiated by the Argo CD instance explicitly deployed for the production environment.

    Technology Stack⚓︎

    The Platform is meticulously engineered to uphold best practices in workload distribution across various environments, including development, testing (manual/automation), user acceptance (UAT), staging, and production. While lower environments like development and testing may feasibly share clusters for workload efficiency, EDP strongly advocates and enforces the necessity of segregating production workloads into dedicated clusters. This segregation ensures the highest isolation, security, and resource allocation levels for mission-critical production systems, adhering to industry standards and ensuring optimal operational integrity.

    EDP harnesses the robust capabilities of Kubernetes in conjunction with a suite of powerful tools tailored for monitoring, logging, and tracing. It integrates the Prometheus stack within ecosystem, leveraging its metrics collection, storage, and querying capabilities to enable comprehensive monitoring of system performance and health. EDP runs OpenSearch for centralized logging, enabling efficient log aggregation, analysis, and management across the platform. Incorporating OpenTelemetry enables standardized and seamless observability data collection, facilitating deep insights into platform behavior and performance. Additionally, it allows for connection with external aggregators and tools that support the OpenTelemetry protocol (OTLP).

    Platform and Tools
    Platform and Tools

    EDP integrates with GitLab, GitHub, and Gerrit for version control. These systems are foundational components enabling efficient source code management, collaboration, and code review processes.

    Platform ensures robust security measures by leveraging OpenID Connect (OIDC) for authentication and authorization across all platform tools and Kubernetes clusters. By employing OIDC, EDP establishes a unified and secure authentication mechanism, enabling seamless access control and user authentication for all tools integrated into the platform. This standardized approach ensures stringent security protocols, maintaining authentication consistency and authorization policies across the platform ecosystem.

    \ No newline at end of file diff --git a/developer-guide/reference-cicd-pipeline/index.html b/developer-guide/reference-cicd-pipeline/index.html index 6cea8e383..de049c3e6 100644 --- a/developer-guide/reference-cicd-pipeline/index.html +++ b/developer-guide/reference-cicd-pipeline/index.html @@ -1 +1 @@ - Reference CI/CD Pipeline - EPAM Delivery Platform
    Skip to content

    Reference CI/CD Pipeline⚓︎

    This document provides an in-depth overview of the Continuous Integration and Continuous Delivery (CI/CD) pipeline reference architecture implemented within the EPAM Delivery Platform (EDP). The pipeline is designed to facilitate efficient and automated software deployment across diverse environments, leveraging a suite of tools and methodologies for enhanced reliability, scalability, and security.

    CI/CD Pipeline Architecture⚓︎

    The CI/CD pipeline within EDP orchestrates the software delivery process, encompassing several sequential stages to ensure robustness and reliability.

    EPAM Delivery Platform Reference CI/CD Pipeline
    EPAM Delivery Platform Reference CI/CD Pipeline

    The CI/CD Pipeline follows a modular and scalable architecture that leverages various tools to ensure the reliability and efficiency of the software delivery process. The architecture can be divided into stages, each responsible for specific tasks. Explore the key components involved in the pipeline and their functionalities:

    1. Source Code: The pipeline starts with the source code, representing the application's codebase. Developers commit their changes to the source code repository, triggering the pipeline.

    2. Validate Commit Message: The commit message validation component checks the format and content of the commit message. It ensures the commit message follows the correct format and includes a valid Tracking Issue key. It helps maintain a standardized commit message format throughout the application development.

    3. Build: The Build component compiles the source code, runs unit tests, and generates the application artifact. It consumes the artifact from the Artifact Repository (Nexus), ensuring consistent and reliable builds.

    4. SAST with SonarQube: The Static Analysis Security Testing (SAST) component utilizes SonarQube to analyze the source code for potential security vulnerabilities, code smells, and quality issues. This step helps identify and address security or code quality issues early in development.

    5. SCA: The Software Composition Analysis (SCA) component performs dependency analysis using cdxgen, Dependency-Track, Semgrep, and DefectDojo. It checks for known vulnerabilities or license compliance issues in the application's dependencies. By identifying and resolving these issues, it ensures the security and stability of the software.

    6. Publish: The Publish component publishes the application artifact to the Artifact Repository. It posts Docker images to the Docker Registry and stores binary artifacts in the Nexus Repository. This process ensures that the artifacts are securely stored and easily accessed for future deployments.

    7. Deploy: The Deploy component uses Argo CD or Tekton to deploy applications to target environments in Kubernetes, leveraging Helm charts to ensure seamless deployment. Deploy to Test/Quality Assurance/Performance Environments: The final stages of the pipeline involve deploying the application to different environments for testing and quality assurance purposes. The results of the tests are consolidated and reported to the Report Portal, facilitating efficient test reporting and analysis.

    The overall architecture of the CI/CD Pipeline ensures a streamlined and automated software delivery process, from source code to deployment. It provides developers with the necessary tools and processes to ensure their applications' quality, security, and scalability. Furthermore, Tekton Chains enhances supply chain security by signing and generating in-toto metadata that verifies the integrity of artifacts and the CI/CD Pipeline.

    Note

    The tools mentioned in this document are just examples and can be replaced with other tools that offer similar functionality. For instance, instead of Harbor for the Docker Registry, it is possible to use AWS ECR. Consider using Azure Artifacts or JFrog Artifactory instead of Nexus for the artifact repository. Instead of setting up an self-managed instance of SonarQube, leverage SonarCloud, the cloud-based version of SonarQube, as an alternative. The CI/CD Pipeline architecture is flexible and adaptable, allowing the use of different tools based on specific project requirements and platform preferences.

    \ No newline at end of file + Reference CI/CD Pipeline - EPAM Delivery Platform
    Skip to content

    Reference CI/CD Pipeline⚓︎

    This document provides an in-depth overview of the Continuous Integration and Continuous Delivery (CI/CD) pipeline reference architecture implemented within the EPAM Delivery Platform (EDP). The pipeline is designed to facilitate efficient and automated software deployment across diverse environments, leveraging a suite of tools and methodologies for enhanced reliability, scalability, and security.

    CI/CD Pipeline Architecture⚓︎

    The CI/CD pipeline within EDP orchestrates the software delivery process, encompassing several sequential stages to ensure robustness and reliability.

    EPAM Delivery Platform Reference CI/CD Pipeline
    EPAM Delivery Platform Reference CI/CD Pipeline

    The CI/CD Pipeline follows a modular and scalable architecture that leverages various tools to ensure the reliability and efficiency of the software delivery process. The architecture can be divided into stages, each responsible for specific tasks. Explore the key components involved in the pipeline and their functionalities:

    1. Source Code: The pipeline starts with the source code, representing the application's codebase. Developers commit their changes to the source code repository, triggering the pipeline.

    2. Validate Commit Message: The commit message validation component checks the format and content of the commit message. It ensures the commit message follows the correct format and includes a valid Tracking Issue key. It helps maintain a standardized commit message format throughout the application development.

    3. Build: The Build component compiles the source code, runs unit tests, and generates the application artifact. It consumes the artifact from the Artifact Repository (Nexus), ensuring consistent and reliable builds.

    4. SAST with SonarQube: The Static Analysis Security Testing (SAST) component utilizes SonarQube to analyze the source code for potential security vulnerabilities, code smells, and quality issues. This step helps identify and address security or code quality issues early in development.

    5. SCA: The Software Composition Analysis (SCA) component performs dependency analysis using cdxgen, Dependency-Track, Semgrep, and DefectDojo. It checks for known vulnerabilities or license compliance issues in the application's dependencies. By identifying and resolving these issues, it ensures the security and stability of the software.

    6. Publish: The Publish component publishes the application artifact to the Artifact Repository. It posts Docker images to the Docker Registry and stores binary artifacts in the Nexus Repository. This process ensures that the artifacts are securely stored and easily accessed for future deployments.

    7. Deploy: The Deploy component uses Argo CD or Tekton to deploy applications to target environments in Kubernetes, leveraging Helm charts to ensure seamless deployment. Deploy to Test/Quality Assurance/Performance Environments: The final stages of the pipeline involve deploying the application to different environments for testing and quality assurance purposes. The results of the tests are consolidated and reported to the Report Portal, facilitating efficient test reporting and analysis.

    The overall architecture of the CI/CD Pipeline ensures a streamlined and automated software delivery process, from source code to deployment. It provides developers with the necessary tools and processes to ensure their applications' quality, security, and scalability. Furthermore, Tekton Chains enhances supply chain security by signing and generating in-toto metadata that verifies the integrity of artifacts and the CI/CD Pipeline.

    Note

    The tools mentioned in this document are just examples and can be replaced with other tools that offer similar functionality. For instance, instead of Harbor for the Docker Registry, it is possible to use AWS ECR. Consider using Azure Artifacts or JFrog Artifactory instead of Nexus for the artifact repository. Instead of setting up an self-managed instance of SonarQube, leverage SonarCloud, the cloud-based version of SonarQube, as an alternative. The CI/CD Pipeline architecture is flexible and adaptable, allowing the use of different tools based on specific project requirements and platform preferences.

    \ No newline at end of file diff --git a/developer-guide/telemetry/index.html b/developer-guide/telemetry/index.html index 6f7334a55..4d4d2b76b 100644 --- a/developer-guide/telemetry/index.html +++ b/developer-guide/telemetry/index.html @@ -1,4 +1,4 @@ - Telemetry - EPAM Delivery Platform
    Skip to content

    Telemetry⚓︎

    The codebase operator for the EPAM Delivery Platform gathers anonymous data through telemetry. This data provides a clear picture of how the platform is being used and empowers the development team to make informed decisions and strategic enhancements to meet evolving operational needs. The anonymous data collected also plays an essential role in adopting a Software Development Life Cycle (SDLC) process strategically.

    Telemetry Data⚓︎

    The codebase-operator collects the following data:

    • The version of the platform
    • The number of codebases created and their parameters: language (for example, Java, NodeJS, etc.), framework (for example, FastAPI, Flask, etc.), build tool (for example, Maven, Gradle, etc.), strategy (for example, Clone, Create, Import), and Type (for example, library, application, etc.)
    • The number of CD pipelines created and their parameters: deployment type (for example, Auto, Manual), and the number of stages
    • The number of Git providers connected to the platform and their types (for example, GitHub, GitLab, Gerrit)
    • Where Jira is enabled or not
    • The type of the Docker registry connected to the platform (for example, Docker Hub, Harbor, ECR)
    package telemetry
    + Telemetry - EPAM Delivery Platform      

    Telemetry⚓︎

    The codebase operator for the EPAM Delivery Platform gathers anonymous data through telemetry. This data provides a clear picture of how the platform is being used and empowers the development team to make informed decisions and strategic enhancements to meet evolving operational needs. The anonymous data collected also plays an essential role in adopting a Software Development Life Cycle (SDLC) process strategically.

    Telemetry Data⚓︎

    The codebase-operator collects the following data:

    • The version of the platform
    • The number of codebases created and their parameters: language (for example, Java, NodeJS, etc.), framework (for example, FastAPI, Flask, etc.), build tool (for example, Maven, Gradle, etc.), strategy (for example, Clone, Create, Import), and Type (for example, library, application, etc.)
    • The number of CD pipelines created and their parameters: deployment type (for example, Auto, Manual), and the number of stages
    • The number of Git providers connected to the platform and their types (for example, GitHub, GitLab, Gerrit)
    • Where Jira is enabled or not
    • The type of the Docker registry connected to the platform (for example, Docker Hub, Harbor, ECR)
    package telemetry
     
     type CodebaseMetrics struct {
       Lang       string `json:"lang"`
    @@ -23,4 +23,4 @@
       Version           string              `json:"version"`
     }
     

    You can verify the code which collects the data in the codebase-operator repository.

    Collecting Timeline⚓︎

    The codebase-operator collects the data every 24 hours and sends it to the EDP Telemetry Service. The first data points are collected 24 hours after the codebase-operator is deployed to allow users to opt-out of telemetry.

    Disabling Telemetry⚓︎

    The codebase-operator collects telemetry data by default. To disable telemetry, set the TELEMETRY_ENABLED environment variable to false in the codebase-operator's deployment configuration. To achieve this, run the following command:

    helm upgrade --install codebase-operator codebase-operator/codebase-operator --set "telemetryEnabled=false"
    -
    \ No newline at end of file +
    \ No newline at end of file diff --git a/faq/index.html b/faq/index.html index ee06f1f42..5256a601c 100644 --- a/faq/index.html +++ b/faq/index.html @@ -1,4 +1,4 @@ - FAQ - EPAM Delivery Platform
    Skip to content

    FAQ⚓︎

    How Do I Set Parallel Reconciliation for a Number of Codebase Branches?⚓︎

    Set the CODEBASE_BRANCH_MAX_CONCURRENT_RECONCILES Env variable in codebase-operator by updating Deployment template. For example:

              ...
    + FAQ - EPAM Delivery Platform      

    FAQ⚓︎

    How Do I Set Parallel Reconciliation for a Number of Codebase Branches?⚓︎

    Set the CODEBASE_BRANCH_MAX_CONCURRENT_RECONCILES Env variable in codebase-operator by updating Deployment template. For example:

              ...
               env:
                 - name: WATCH_NAMESPACE
               ...
    @@ -6,4 +6,4 @@
                 - name: CODEBASE_BRANCH_MAX_CONCURRENT_RECONCILES
                   value: 10
               ...
    -

    It's not recommended to set the value above 10.

    How To Change the Lifespan of an Access Token That Is Used for EDP Portal and 'oidc-login' Plugin?⚓︎

    Change the Access Token Lifespan: go to your Keycloak and select Openshift realm > Realm settings > Tokens > Access Token Lifespan > set a new value to the field and save this change.

    By default, "Access Token Lifespan" value is 5 minutes.

    Access Token Lifespan
    Access Token Lifespan

    \ No newline at end of file +

    It's not recommended to set the value above 10.

    How To Change the Lifespan of an Access Token That Is Used for EDP Portal and 'oidc-login' Plugin?⚓︎

    Change the Access Token Lifespan: go to your Keycloak and select Openshift realm > Realm settings > Tokens > Access Token Lifespan > set a new value to the field and save this change.

    By default, "Access Token Lifespan" value is 5 minutes.

    Access Token Lifespan
    Access Token Lifespan

    \ No newline at end of file diff --git a/features/index.html b/features/index.html index ae3e23a26..7313543f1 100644 --- a/features/index.html +++ b/features/index.html @@ -1 +1 @@ - Basic Concepts - EPAM Delivery Platform
    Skip to content

    Basic Concepts⚓︎

    Consult EDP Glossary section for definitions mentioned on this page and EDP Toolset to have a full list of tools used with the Platform. The below table contains a full list of features provided by EDP.

    Features Description
    Cloud Agnostic EDP runs on Kubernetes cluster, so any Public Cloud Provider which provides Kubernetes can be used. Kubernetes clusters deployed on-premises work as well
    CI/CD for Microservices EDP is initially designed to support CI/CD for Microservices running as containerized applications inside Kubernetes Cluster. EDP also supports CI for:
    - Terraform Modules,
    - Open Policy Rules,
    - Workflows for Java (8,11,17), JavaScript (React, Vue, Angular, Express, Antora), C# (.NET 6.0), Python (FastAPI, Flask, 3.8), Go (Beego, Operator SDK)
    Version Control System (VCS) EDP installs Gerrit as a default Source Code Management (SCM) tool. EDP also supports GitHub and GitLab integration
    Branching Strategy EDP supports Trunk-based development as well as GitHub/GitLab flow. EDP creates two Pipelines per each codebase branch: Code Review and Build
    Repository Structure EDP provides separate Git repository per each Codebase and doesn't work with Monorepo. However, EDP does support customization and runs helm-lint, dockerfile-lint steps using Monorepo approach.
    Artifacts Versioning EDP supports two approaches for Artifacts versioning:
    - default (BRANCH-[TECH_STACK_VERSION]-BUILD_ID)
    - EDP (MAJOR.MINOR.PATCH-BUILD_ID), which is SemVer.
    Custom versioning can be created by implementing get-version stage
    Application Library EDP provides baseline codebase templates for Microservices, Libraries, within create strategy while onboarding new Codebase
    Stages Library Each EDP Pipeline consists of pre-defined steps (stages).
    CI Pipelines EDP provides CI Pipelines for first-class citizens:
    - Applications (Microservices) based on Java (8,11,17), JavaScript (React, Vue, Angular, Express, Antora, Next.js), C# (.NET 3.1, .NET 6.0), Python (FastAPI, Flask, 3.8), Go (Beego, Gin, Operator SDK), Helm (Charts, Pipeline)
    - Libraries based on Java (8,11,17), JavaScript (React, Vue, Angular, Express), Python (FastAPI, Flask, 3.8), Groovy Pipeline (Codenarc), HCL (Terraform), Rego (OPA), Container (Docker), Helm (Charts, Pipeline), C#(.NET 3.1, .NET 6.0)
    - Autotests based on Java8, Java11, Java17
    CD Pipelines EDP provides capabilities to design CD Pipelines (in EDP Portal UI) for Microservices and defines logic for artifacts flow (promotion) from env to env. Artifacts promotion is performed automatically (Autotests), manually (User Approval) or combining both approaches
    Autotests EDP provides CI pipeline for autotest implemented in Java. Autotests can be used as Quality Gates in CD Pipelines
    Custom Pipeline Library EDP can be extended by introducing Custom Pipeline Library
    Dynamic Environments Each EDP CD Pipeline creates/destroys environment upon user requests
    \ No newline at end of file + Basic Concepts - EPAM Delivery Platform
    Skip to content

    Basic Concepts⚓︎

    Consult EDP Glossary section for definitions mentioned on this page and EDP Toolset to have a full list of tools used with the Platform. The below table contains a full list of features provided by EDP.

    Features Description
    Cloud Agnostic EDP runs on Kubernetes cluster, so any Public Cloud Provider which provides Kubernetes can be used. Kubernetes clusters deployed on-premises work as well
    CI/CD for Microservices EDP is initially designed to support CI/CD for Microservices running as containerized applications inside Kubernetes Cluster. EDP also supports CI for:
    - Terraform Modules,
    - Open Policy Rules,
    - Workflows for Java (8,11,17), JavaScript (React, Vue, Angular, Express, Antora), C# (.NET 6.0), Python (FastAPI, Flask, 3.8), Go (Beego, Operator SDK)
    Version Control System (VCS) EDP installs Gerrit as a default Source Code Management (SCM) tool. EDP also supports GitHub and GitLab integration
    Branching Strategy EDP supports Trunk-based development as well as GitHub/GitLab flow. EDP creates two Pipelines per each codebase branch: Code Review and Build
    Repository Structure EDP provides separate Git repository per each Codebase and doesn't work with Monorepo. However, EDP does support customization and runs helm-lint, dockerfile-lint steps using Monorepo approach.
    Artifacts Versioning EDP supports two approaches for Artifacts versioning:
    - default (BRANCH-[TECH_STACK_VERSION]-BUILD_ID)
    - EDP (MAJOR.MINOR.PATCH-BUILD_ID), which is SemVer.
    Custom versioning can be created by implementing get-version stage
    Application Library EDP provides baseline codebase templates for Microservices, Libraries, within create strategy while onboarding new Codebase
    Stages Library Each EDP Pipeline consists of pre-defined steps (stages).
    CI Pipelines EDP provides CI Pipelines for first-class citizens:
    - Applications (Microservices) based on Java (8,11,17), JavaScript (React, Vue, Angular, Express, Antora, Next.js), C# (.NET 3.1, .NET 6.0), Python (FastAPI, Flask, 3.8), Go (Beego, Gin, Operator SDK), Helm (Charts, Pipeline)
    - Libraries based on Java (8,11,17), JavaScript (React, Vue, Angular, Express), Python (FastAPI, Flask, 3.8), Groovy Pipeline (Codenarc), HCL (Terraform), Rego (OPA), Container (Docker), Helm (Charts, Pipeline), C#(.NET 3.1, .NET 6.0)
    - Autotests based on Java8, Java11, Java17
    CD Pipelines EDP provides capabilities to design CD Pipelines (in EDP Portal UI) for Microservices and defines logic for artifacts flow (promotion) from env to env. Artifacts promotion is performed automatically (Autotests), manually (User Approval) or combining both approaches
    Autotests EDP provides CI pipeline for autotest implemented in Java. Autotests can be used as Quality Gates in CD Pipelines
    Custom Pipeline Library EDP can be extended by introducing Custom Pipeline Library
    Dynamic Environments Each EDP CD Pipeline creates/destroys environment upon user requests
    \ No newline at end of file diff --git a/glossary/index.html b/glossary/index.html index 65f509539..d0e48b956 100644 --- a/glossary/index.html +++ b/glossary/index.html @@ -1 +1 @@ - Glossary - EPAM Delivery Platform
    Skip to content

    Glossary⚓︎

    Get familiar with the definitions and context for the most useful EDP terms presented in table below.

    Terms Details
    EDP Component - an item used in CI/CD process EDP Portal UI - an EDP component that helps to manage, set up, and control the business entities.
    Artifactory - an EDP component that stores all the binary artifacts. NOTE: Nexus is used as a possible implementation of a repository.
    CI/CD Server - an EDP component that launches pipelines that perform the build, QA, and deployment code logic. NOTE: Tekton is used as a possible implementation of a CI/CD server.
    Code Review tool - an EDP component that collaborates with the changes in the codebase. NOTE: Gerrit is used as a possible implementation of a code review tool.
    Identity Server - an authentication server providing a common way to verify requests to all of the applications. NOTE: Keycloak is used as a possible implementation of an identity server.
    Security Realm Tenant - a realm in identity server (e.g Keycloak) where all users' accounts and their access permissions are managed. The realm is unique for the identity server instance.
    Static Code Analyzer - an EDP component that inspects continuously a code quality before the necessary changes appear in a master branch. NOTE: SonarQube is used as a possible implementation of a static code analyzer.
    VCS (Version Control System) - a replication of the Gerrit repository that displays all the changes made by developers. NOTE: GitHub and GitLab are used as the possible implementation of a repository with the version control system.
    EDP Business Entity - a part of the CI/CD process (the integration, delivery, and deployment of any codebase changes) Application - a codebase type that is built as the binary artifact and deployable unit with the code that is stored in VCS. As a result, the application becomes a container and can be deployed in an environment.
    Autotests - a codebase type that inspects a product (e.g. an application set) on a stage. Autotests are not deployed to any container and launched from the respective code stage.
    CD Pipeline (Continuous Delivery Pipeline) - an EDP business entity that describes the whole delivery process of the selected application set via the respective stages. The main idea of the CD pipeline is to promote the application version between the stages by applying the sequential verification (i.e. the second stage will be available if the verification on the first stage is successfully completed). NOTE: The CD pipeline can include the essential set of applications with its specific stages as well.
    CD Pipeline Stage - an EDP business entity that is presented as the logical gate required for the application set inspection. Every stage has one OpenShift project where the selected application set is deployed. All stages are sequential and promote applications one-by-one.
    Codebase - an EDP business entity that possesses a code.
    Codebase Branch - an EDP business entity that represents a specific version in a Git branch. Every codebase branch has a Codebase Docker Stream entity.
    Codebase Docker Stream - a deployable component that leads to the application build and displays that the last build was verified on the specific stage. Every CD pipeline stage accepts a set of Codebase Docker Streams (CDS) that are input and output. SAMPLE: if an application1 has a master branch, the input CDS will be named as [app name]-[pipeline name]-[stage name]-[master] and the output after the passing of the DEV stage will be as follows: [app name]-[pipeline name]-[stage name]-[dev]-[verified].
    Git Server - a custom resource that is responsible for integration with Version Control System (VCS), whether it is GitHub, GitLab or Gerrit.
    Infrastructure - a codebase type that is used to define and manage the underlying infrastructure of projects using the Infrastructure as Code (IaC) approach, ensuring consistency and reproducibility.
    Library - a codebase type that is built as the binary artifact, i.e. it`s stored in the Artifactory and can be uploaded by other applications, autotests or libraries.
    Quality Gate - an EDP business entity that represents the minimum acceptable results after the testing. Every stage has a quality gate that should be passed to promote the application. The stage quality gate can be a manual approve from a QA specialist OR a successful autotest launch.
    Quality Gate Type - this value defines trigger type that promotes artifacts (images) to the next environment in CD Pipeline. There are manual and automatic types of quality gates. The manual type means that the promoting process should be confirmed in Tekton. The automatic type promotes the images automatically in case there are no errors in the Allure Report. NOTE: If any of the test types is not passed, the CD pipeline will fail.
    Trigger Type - a value that defines a trigger type used for the CD pipeline triggering. There are manual and automatic types of triggering. The manual type means that the CD pipeline should be triggered manually. The automatic type triggers the CD pipeline automatically as soon as the Codebase Docker Stream was changed.
    Automated Tests - different types of automated tests that can be run on the environment for a specific stage.
    Build Pipeline - a Tekton pipeline that builds a corresponding codebase branch in the Codebase.
    Build Stage - a stage that takes place after the code has been submitted/merged to the repository of the main branch (the pull request from the feature branch is merged to the main one, the Patch set is submitted in Gerrit).
    Code Review Pipeline - a Tekton pipeline that inspects the code candidate in the Code Review tool.
    Code Review Stage - a stage where code is reviewed before it goes to the main branch repository of the version control system (the commit to the feature branch is pushed, the Patch set is created in Gerrit).
    Deploy Pipeline - a Tekton pipeline that is responsible for the CD Pipeline Stage deployment with the full set of applications and autotests.
    Deployment Stage - a part of the Continuous Delivery where artifacts are being deployed to environments.
    EDP CI Pipelines - an orchestrator for stages that is responsible for the common technical events, e.g. initialization, in Tekton pipeline.
    Environment - a part of the stage where the built and packed into an image application are deployed for further testing. It`s possible to deploy several applications to several environments (Team and Integration environments) within one stage.
    Team Environment - an environment type that can be deployed at any time by the manual trigger of the Deploy pipeline where team or developers can check out their applications. NOTE: The promotion from such kind of environment is prohibited and developed only for the local testing.
    OpenShift / Kubernetes (K8S) ConfigMap - a resource that stores configuration data and processes the strings that do not contain sensitive information.
    Docker Container - is a lightweight, standalone, and executable package.
    Docker Registry - a store for the Docker Container that is created for the application after the Build pipeline performance.
    OpenShift Web Console - a web console that enables to view, manage, and change OpenShift / K8S resources using browser.
    Operator Framework - a deployable unit in OpenShift that is responsible for one or a set of resources and performs its life circle (adding, displaying, and provisioning).
    Path - a route component that helps to find a specified path (e.g. /api) at once and skip the other.
    Pod - the smallest deployable unit of the large microservice application that is responsible for the application launch. The pod is presented as the one launched Docker container. When the Docker container is collected, it will be kept in Docker Registry and then saved as Pod in the OpenShift project. NOTE: The Deployment Config is responsible for the Pod push, restart, and stop processes.
    PV (Persistent Volume) - a cluster resource that captures the details of the storage implementation and has an independent lifecycle of any individual pod.
    PVC (Persistent Volume Claim) - a user request for storage that can request specific size and access mode. PV resources are consumed by PVCs.
    Route - a resource in OpenShift that allows getting the external access to the pushed application.
    Secret - an object that stores and manages all the sensitive information (e.g. passwords, tokens, and SSH keys).
    Service - an external connection point with Pod that is responsible for the network. A specific Service is connected to a specific Pod using labels and redirects all the requests to Pod as well.
    Site - a route component (link name) that is created from the indicated application name and applies automatically the project name and a wildcard DNS record.
    \ No newline at end of file + Glossary - EPAM Delivery Platform
    Skip to content

    Glossary⚓︎

    Get familiar with the definitions and context for the most useful EDP terms presented in table below.

    Terms Details
    EDP Component - an item used in CI/CD process EDP Portal UI - an EDP component that helps to manage, set up, and control the business entities.
    Artifactory - an EDP component that stores all the binary artifacts. NOTE: Nexus is used as a possible implementation of a repository.
    CI/CD Server - an EDP component that launches pipelines that perform the build, QA, and deployment code logic. NOTE: Tekton is used as a possible implementation of a CI/CD server.
    Code Review tool - an EDP component that collaborates with the changes in the codebase. NOTE: Gerrit is used as a possible implementation of a code review tool.
    Identity Server - an authentication server providing a common way to verify requests to all of the applications. NOTE: Keycloak is used as a possible implementation of an identity server.
    Security Realm Tenant - a realm in identity server (e.g Keycloak) where all users' accounts and their access permissions are managed. The realm is unique for the identity server instance.
    Static Code Analyzer - an EDP component that inspects continuously a code quality before the necessary changes appear in a master branch. NOTE: SonarQube is used as a possible implementation of a static code analyzer.
    VCS (Version Control System) - a replication of the Gerrit repository that displays all the changes made by developers. NOTE: GitHub and GitLab are used as the possible implementation of a repository with the version control system.
    EDP Business Entity - a part of the CI/CD process (the integration, delivery, and deployment of any codebase changes) Application - a codebase type that is built as the binary artifact and deployable unit with the code that is stored in VCS. As a result, the application becomes a container and can be deployed in an environment.
    Autotests - a codebase type that inspects a product (e.g. an application set) on a stage. Autotests are not deployed to any container and launched from the respective code stage.
    CD Pipeline (Continuous Delivery Pipeline) - an EDP business entity that describes the whole delivery process of the selected application set via the respective stages. The main idea of the CD pipeline is to promote the application version between the stages by applying the sequential verification (i.e. the second stage will be available if the verification on the first stage is successfully completed). NOTE: The CD pipeline can include the essential set of applications with its specific stages as well.
    CD Pipeline Stage - an EDP business entity that is presented as the logical gate required for the application set inspection. Every stage has one OpenShift project where the selected application set is deployed. All stages are sequential and promote applications one-by-one.
    Codebase - an EDP business entity that possesses a code.
    Codebase Branch - an EDP business entity that represents a specific version in a Git branch. Every codebase branch has a Codebase Docker Stream entity.
    Codebase Docker Stream - a deployable component that leads to the application build and displays that the last build was verified on the specific stage. Every CD pipeline stage accepts a set of Codebase Docker Streams (CDS) that are input and output. SAMPLE: if an application1 has a master branch, the input CDS will be named as [app name]-[pipeline name]-[stage name]-[master] and the output after the passing of the DEV stage will be as follows: [app name]-[pipeline name]-[stage name]-[dev]-[verified].
    Git Server - a custom resource that is responsible for integration with Version Control System (VCS), whether it is GitHub, GitLab or Gerrit.
    Infrastructure - a codebase type that is used to define and manage the underlying infrastructure of projects using the Infrastructure as Code (IaC) approach, ensuring consistency and reproducibility.
    Library - a codebase type that is built as the binary artifact, i.e. it`s stored in the Artifactory and can be uploaded by other applications, autotests or libraries.
    Quality Gate - an EDP business entity that represents the minimum acceptable results after the testing. Every stage has a quality gate that should be passed to promote the application. The stage quality gate can be a manual approve from a QA specialist OR a successful autotest launch.
    Quality Gate Type - this value defines trigger type that promotes artifacts (images) to the next environment in CD Pipeline. There are manual and automatic types of quality gates. The manual type means that the promoting process should be confirmed in Tekton. The automatic type promotes the images automatically in case there are no errors in the Allure Report. NOTE: If any of the test types is not passed, the CD pipeline will fail.
    Trigger Type - a value that defines a trigger type used for the CD pipeline triggering. There are manual and automatic types of triggering. The manual type means that the CD pipeline should be triggered manually. The automatic type triggers the CD pipeline automatically as soon as the Codebase Docker Stream was changed.
    Automated Tests - different types of automated tests that can be run on the environment for a specific stage.
    Build Pipeline - a Tekton pipeline that builds a corresponding codebase branch in the Codebase.
    Build Stage - a stage that takes place after the code has been submitted/merged to the repository of the main branch (the pull request from the feature branch is merged to the main one, the Patch set is submitted in Gerrit).
    Code Review Pipeline - a Tekton pipeline that inspects the code candidate in the Code Review tool.
    Code Review Stage - a stage where code is reviewed before it goes to the main branch repository of the version control system (the commit to the feature branch is pushed, the Patch set is created in Gerrit).
    Deploy Pipeline - a Tekton pipeline that is responsible for the CD Pipeline Stage deployment with the full set of applications and autotests.
    Deployment Stage - a part of the Continuous Delivery where artifacts are being deployed to environments.
    EDP CI Pipelines - an orchestrator for stages that is responsible for the common technical events, e.g. initialization, in Tekton pipeline.
    Environment - a part of the stage where the built and packed into an image application are deployed for further testing. It`s possible to deploy several applications to several environments (Team and Integration environments) within one stage.
    Team Environment - an environment type that can be deployed at any time by the manual trigger of the Deploy pipeline where team or developers can check out their applications. NOTE: The promotion from such kind of environment is prohibited and developed only for the local testing.
    OpenShift / Kubernetes (K8S) ConfigMap - a resource that stores configuration data and processes the strings that do not contain sensitive information.
    Docker Container - is a lightweight, standalone, and executable package.
    Docker Registry - a store for the Docker Container that is created for the application after the Build pipeline performance.
    OpenShift Web Console - a web console that enables to view, manage, and change OpenShift / K8S resources using browser.
    Operator Framework - a deployable unit in OpenShift that is responsible for one or a set of resources and performs its life circle (adding, displaying, and provisioning).
    Path - a route component that helps to find a specified path (e.g. /api) at once and skip the other.
    Pod - the smallest deployable unit of the large microservice application that is responsible for the application launch. The pod is presented as the one launched Docker container. When the Docker container is collected, it will be kept in Docker Registry and then saved as Pod in the OpenShift project. NOTE: The Deployment Config is responsible for the Pod push, restart, and stop processes.
    PV (Persistent Volume) - a cluster resource that captures the details of the storage implementation and has an independent lifecycle of any individual pod.
    PVC (Persistent Volume Claim) - a user request for storage that can request specific size and access mode. PV resources are consumed by PVCs.
    Route - a resource in OpenShift that allows getting the external access to the pushed application.
    Secret - an object that stores and manages all the sensitive information (e.g. passwords, tokens, and SSH keys).
    Service - an external connection point with Pod that is responsible for the network. A specific Service is connected to a specific Pod using labels and redirects all the requests to Pod as well.
    Site - a route component (link name) that is created from the indicated application name and applies automatically the project name and a wildcard DNS record.
    \ No newline at end of file diff --git a/index.html b/index.html index 26ba79687..02d470902 100644 --- a/index.html +++ b/index.html @@ -1,4 +1,4 @@ - Home - EPAM Delivery Platform
    Skip to content
    Skip to content

    Build your delivery rocket

    Boost your delivery with the development culture based on the modern CI/CD stack, golden path and self-service capabilities of the EPAM Delivery Platform (EDP).

    Getting started Request a demo Watch teaser
    \ No newline at end of file +

    Build your delivery rocket

    Boost your delivery with the development culture based on the modern CI/CD stack, golden path and self-service capabilities of the EPAM Delivery Platform (EDP).

    Getting started Request a demo Watch teaser
    \ No newline at end of file diff --git a/operator-guide/add-ons-overview/index.html b/operator-guide/add-ons-overview/index.html index 45b599d1d..9f809c6bf 100644 --- a/operator-guide/add-ons-overview/index.html +++ b/operator-guide/add-ons-overview/index.html @@ -1,7 +1,7 @@ - Install via Add-Ons - EPAM Delivery Platform
    Skip to content

    Install via Add-Ons⚓︎

    This page describes the entity of Cluster Add-Ons for EPAM Delivery Platform, as well as their purpose, benefits and usage.

    What Are Add-Ons⚓︎

    EDP Add-Ons is basically a Kubernetes-based structure that enables users to quickly install additional components for the platform using Argo CD applications.

    Add-Ons have been introduced into EDP starting from version 3.4.0. They empower users to seamlessly incorporate the platform with various additional components, such as SonarQube, Nexus, Keycloak, Jira, and more. This eliminates the need for manual installations, as outlined in the Install EDP page.

    In a nutshell, Add-Ons represent separate Helm Charts that imply to be installed by one click using the Argo CD tool.

    Add-Ons Repository Structure⚓︎

    All the Add-Ons are stored in our public GitHub repository adhering to the GitOps approach. Apart from default Helm and Git files, it contains both custom resources called Applications for Argo CD and application source code. The repository follows the GitOps approach to enable Add-Ons with the capability to rollback changes when needed. The repository structure is the following:

      ├── CHANGELOG.md
    + Install via Add-Ons - EPAM Delivery Platform      

    Install via Add-Ons⚓︎

    This page describes the entity of Cluster Add-Ons for EPAM Delivery Platform, as well as their purpose, benefits and usage.

    What Are Add-Ons⚓︎

    EDP Add-Ons is basically a Kubernetes-based structure that enables users to quickly install additional components for the platform using Argo CD applications.

    Add-Ons have been introduced into EDP starting from version 3.4.0. They empower users to seamlessly incorporate the platform with various additional components, such as SonarQube, Nexus, Keycloak, Jira, and more. This eliminates the need for manual installations, as outlined in the Install EDP page.

    In a nutshell, Add-Ons represent separate Helm Charts that imply to be installed by one click using the Argo CD tool.

    Add-Ons Repository Structure⚓︎

    All the Add-Ons are stored in our public GitHub repository adhering to the GitOps approach. Apart from default Helm and Git files, it contains both custom resources called Applications for Argo CD and application source code. The repository follows the GitOps approach to enable Add-Ons with the capability to rollback changes when needed. The repository structure is the following:

      ├── CHANGELOG.md
       ├── LICENSE
       ├── Makefile
       ├── README.md
       ├── add-ons
       └── chart
    -
    • add-ons - the directory that contains Helm charts of the applications that can be integrated with EDP using Add-Ons.
    • chart - the directory that contains Helm charts with application templates that will be used to create custom resources called Applications for Argo CD.

    Enable EDP Add-Ons⚓︎

    To enable EDP Add-Ons, it is necessary to have the configured Argo CD, and connect and synchronize the forked repository. To do this, follow the guidelines below:

    1. Fork the Add-Ons repository to your personal account.

    2. Provide the parameter values for the values.yaml files of the desired Add-Ons you are going to install.

    3. Navigate to Argo CD -> Settings -> Repositories. Connect your forked repository where you have the values.yaml files changed by clicking the + Connect repo button:

      Connect the forked repository
      Connect the forked repository

    4. In the appeared window, fill in the following fields and click the Connect button:

      • Name - select the namespace where the project is going to be deployed;
      • Choose your connection method - choose Via SSH;
      • Type - choose Helm;
      • Repository URL - enter the URL of your forked repository.

      Repository parameters
      Repository parameters

    5. As soon as the repository is connected, the new item in the repository list will appear:

      Connected repository
      Connected repository

    6. Navigate to Argo CD -> Applications. Click the + New app button:

      Adding Argo CD application
      Adding Argo CD application

    7. Fill in the required fields:

      • Application Name - addons-demo;
      • Project name - select the namespace where the project is going to be deployed;
      • Sync policy - select Manual;
      • Repository URL - enter the URL of your forked repository;
      • Revision - Head;
      • Path - select chart;
      • Cluster URL - enter the URL of your cluster;
      • Namespace - enter the namespace which must be equal to the Project name field.
    8. As soon as the repository is synchronized, the list of applications that can be installed by Add-Ons will be shown:

      Add-Ons list
      Add-Ons list

    Install EDP Add-Ons⚓︎

    Now that Add-Ons are enabled in Argo CD, they can be installed by following the steps below:

    1. Choose the Add-On to install.

    2. On the chosen Add-On, click the â‹® button and then Details:

      Open the Add-On
      Open Add-Ons

    3. To install the Add-On, click the â‹® button -> Sync:

      Install Add-Ons
      Install Add-Ons

    4. Once the Add-On is installed, the Sync OK message will appear in the Add-On status bar:

      Sync OK message
      Sync OK message

    5. Open the application details by clicking on the little square with an arrow underneath the Add-On name:

      Open details
      Open details

    6. Track application resources and status in the App details menu:

      Application details
      Application details

    As we see, Argo CD offers great observability and monitoring tools for its resources which comes in handy when using EDP Add-Ons.

    Available Add-Ons List⚓︎

    The list of the available Add-Ons:

    Name Description Default
    Argo CD A GitOps continuous delivery tool that helps automate the deployment, configuration, and lifecycle management of applications in Kubernetes clusters. false
    AWS EFS CSI Driver A Container Storage Interface (CSI) driver that enables the dynamic provisioning of Amazon Elastic File System (EFS) volumes in Kubernetes clusters. true
    Cert Manager A native Kubernetes certificate management controller that automates the issuance and renewal of TLS certificates. true
    DefectDojo A security vulnerability management tool that allows tracking and managing security findings in applications. true
    DependencyTrack A Software Composition Analysis (SCA) platform that helps identify and manage open-source dependencies and their associated vulnerabilities. true
    EDP An internal platform created by EPAM to enhance software delivery processes using DevOps principles and tools. false
    Extensions OIDC EDP Helm chart to provision OIDC clients for different Add-Ons using EDP Keycloak Operator. true
    External Secrets A Kubernetes Operator that fetches secrets from external secret management systems and injects them as Kubernetes Secrets. true
    Fluent Bit A lightweight and efficient log processor and forwarder that collects and routes logs from various sources in Kubernetes clusters. false
    Harbor A cloud-native container image registry that provides support for vulnerability scanning, policy-based image replication, and more. true
    Nginx ingress An Ingress controller that provides external access to services running within a Kubernetes cluster using Nginx as the underlying server. true
    Jaeger Operator An operator for deploying and managing Jaeger, an end-to-end distributed tracing system, in Kubernetes clusters. true
    Keycloak An open-source Identity and Access Management (IAM) solution that enables authentication, authorization, and user management in Kubernetes clusters. true
    Keycloak PostgreSQL A PostgreSQL database operator that simplifies the deployment and management of PostgreSQL instances in Kubernetes clusters. false
    MinIO Operator An operator that simplifies the deployment and management of MinIO, a high-performance object storage server compatible with Amazon S3, in Kubernetes clusters. true
    OpenSearch A community-driven, open-source search and analytics engine that provides scalable and distributed search capabilities for Kubernetes clusters. true
    OpenTelemetry Operator An operator for automating the deployment and management of OpenTelemetry, a set of observability tools for capturing, analyzing, and exporting telemetry data. true
    PostgreSQL Operator An operator for running and managing PostgreSQL databases in Kubernetes clusters with high availability and scalability. true
    Prometheus Operator An operator that simplifies the deployment and management of Prometheus, a monitoring and alerting toolkit, in Kubernetes clusters. true
    Redis Operator An operator for managing Redis, an in-memory data structure store, in Kubernetes clusters, providing high availability and horizontal scalability. true
    StorageClass A Kubernetes resource that provides a way to define different classes of storage with different performance characteristics for persistent volumes. true
    Tekton A flexible and cloud-native framework for building, testing, and deploying applications using Kubernetes-native workflows. true
    Vault An open-source secrets management solution that provides secure storage, encryption, and access control for sensitive data in Kubernetes clusters. true
    \ No newline at end of file +
    • add-ons - the directory that contains Helm charts of the applications that can be integrated with EDP using Add-Ons.
    • chart - the directory that contains Helm charts with application templates that will be used to create custom resources called Applications for Argo CD.

    Enable EDP Add-Ons⚓︎

    To enable EDP Add-Ons, it is necessary to have the configured Argo CD, and connect and synchronize the forked repository. To do this, follow the guidelines below:

    1. Fork the Add-Ons repository to your personal account.

    2. Provide the parameter values for the values.yaml files of the desired Add-Ons you are going to install.

    3. Navigate to Argo CD -> Settings -> Repositories. Connect your forked repository where you have the values.yaml files changed by clicking the + Connect repo button:

      Connect the forked repository
      Connect the forked repository

    4. In the appeared window, fill in the following fields and click the Connect button:

      • Name - select the namespace where the project is going to be deployed;
      • Choose your connection method - choose Via SSH;
      • Type - choose Helm;
      • Repository URL - enter the URL of your forked repository.

      Repository parameters
      Repository parameters

    5. As soon as the repository is connected, the new item in the repository list will appear:

      Connected repository
      Connected repository

    6. Navigate to Argo CD -> Applications. Click the + New app button:

      Adding Argo CD application
      Adding Argo CD application

    7. Fill in the required fields:

      • Application Name - addons-demo;
      • Project name - select the namespace where the project is going to be deployed;
      • Sync policy - select Manual;
      • Repository URL - enter the URL of your forked repository;
      • Revision - Head;
      • Path - select chart;
      • Cluster URL - enter the URL of your cluster;
      • Namespace - enter the namespace which must be equal to the Project name field.
    8. As soon as the repository is synchronized, the list of applications that can be installed by Add-Ons will be shown:

      Add-Ons list
      Add-Ons list

    Install EDP Add-Ons⚓︎

    Now that Add-Ons are enabled in Argo CD, they can be installed by following the steps below:

    1. Choose the Add-On to install.

    2. On the chosen Add-On, click the â‹® button and then Details:

      Open the Add-On
      Open Add-Ons

    3. To install the Add-On, click the â‹® button -> Sync:

      Install Add-Ons
      Install Add-Ons

    4. Once the Add-On is installed, the Sync OK message will appear in the Add-On status bar:

      Sync OK message
      Sync OK message

    5. Open the application details by clicking on the little square with an arrow underneath the Add-On name:

      Open details
      Open details

    6. Track application resources and status in the App details menu:

      Application details
      Application details

    As we see, Argo CD offers great observability and monitoring tools for its resources which comes in handy when using EDP Add-Ons.

    Available Add-Ons List⚓︎

    The list of the available Add-Ons:

    Name Description Default
    Argo CD A GitOps continuous delivery tool that helps automate the deployment, configuration, and lifecycle management of applications in Kubernetes clusters. false
    AWS EFS CSI Driver A Container Storage Interface (CSI) driver that enables the dynamic provisioning of Amazon Elastic File System (EFS) volumes in Kubernetes clusters. true
    Cert Manager A native Kubernetes certificate management controller that automates the issuance and renewal of TLS certificates. true
    DefectDojo A security vulnerability management tool that allows tracking and managing security findings in applications. true
    DependencyTrack A Software Composition Analysis (SCA) platform that helps identify and manage open-source dependencies and their associated vulnerabilities. true
    EDP An internal platform created by EPAM to enhance software delivery processes using DevOps principles and tools. false
    Extensions OIDC EDP Helm chart to provision OIDC clients for different Add-Ons using EDP Keycloak Operator. true
    External Secrets A Kubernetes Operator that fetches secrets from external secret management systems and injects them as Kubernetes Secrets. true
    Fluent Bit A lightweight and efficient log processor and forwarder that collects and routes logs from various sources in Kubernetes clusters. false
    Harbor A cloud-native container image registry that provides support for vulnerability scanning, policy-based image replication, and more. true
    Nginx ingress An Ingress controller that provides external access to services running within a Kubernetes cluster using Nginx as the underlying server. true
    Jaeger Operator An operator for deploying and managing Jaeger, an end-to-end distributed tracing system, in Kubernetes clusters. true
    Keycloak An open-source Identity and Access Management (IAM) solution that enables authentication, authorization, and user management in Kubernetes clusters. true
    Keycloak PostgreSQL A PostgreSQL database operator that simplifies the deployment and management of PostgreSQL instances in Kubernetes clusters. false
    MinIO Operator An operator that simplifies the deployment and management of MinIO, a high-performance object storage server compatible with Amazon S3, in Kubernetes clusters. true
    OpenSearch A community-driven, open-source search and analytics engine that provides scalable and distributed search capabilities for Kubernetes clusters. true
    OpenTelemetry Operator An operator for automating the deployment and management of OpenTelemetry, a set of observability tools for capturing, analyzing, and exporting telemetry data. true
    PostgreSQL Operator An operator for running and managing PostgreSQL databases in Kubernetes clusters with high availability and scalability. true
    Prometheus Operator An operator that simplifies the deployment and management of Prometheus, a monitoring and alerting toolkit, in Kubernetes clusters. true
    Redis Operator An operator for managing Redis, an in-memory data structure store, in Kubernetes clusters, providing high availability and horizontal scalability. true
    StorageClass A Kubernetes resource that provides a way to define different classes of storage with different performance characteristics for persistent volumes. true
    Tekton A flexible and cloud-native framework for building, testing, and deploying applications using Kubernetes-native workflows. true
    Vault An open-source secrets management solution that provides secure storage, encryption, and access control for sensitive data in Kubernetes clusters. true
    \ No newline at end of file diff --git a/operator-guide/advanced-installation-overview/index.html b/operator-guide/advanced-installation-overview/index.html index 41d879d1e..1ca2786ab 100644 --- a/operator-guide/advanced-installation-overview/index.html +++ b/operator-guide/advanced-installation-overview/index.html @@ -1 +1 @@ - Overview - EPAM Delivery Platform
    Skip to content

    Advanced Installation Overview⚓︎

    This page serves as a brief overview of all the advanced components within EDP. While these third-party tools are not mandatory, they significantly enhance the platform's capabilities, enabling the creation of a robust CI/CD environment.

    EDP Third-Party Components⚓︎

    Find below the list of the key components used by EPAM Delivery Platform:

    Component Requirement Level Cluster
    Tekton Mandatory
    Argo CD Mandatory
    NGINX Ingress Controller1 Mandatory
    Keycloak Optional
    DefectDojo Optional
    ReportPortal Optional
    Kiosk2 Optional
    Capsule2 Optional
    External Secrets Optional
    Nexus Optional
    Harbor Optional

    Although we have dedicated instructions for all of these third-party tools, for those who installed EDP using cluster add-ons, we recommend installing them via add-ons correspondingly.


    1. OpenShift cluster uses Routes to provide access to pods from external resources. ↩

    2. These tools need to be installed in advance before deploying EDP. ↩↩

    \ No newline at end of file + Overview - EPAM Delivery Platform
    Skip to content

    Advanced Installation Overview⚓︎

    This page serves as a brief overview of all the advanced components within EDP. While these third-party tools are not mandatory, they significantly enhance the platform's capabilities, enabling the creation of a robust CI/CD environment.

    EDP Third-Party Components⚓︎

    Find below the list of the key components used by EPAM Delivery Platform:

    Component Requirement Level Cluster
    Tekton Mandatory
    Argo CD Mandatory
    NGINX Ingress Controller1 Mandatory
    Keycloak Optional
    DefectDojo Optional
    ReportPortal Optional
    Kiosk2 Optional
    Capsule2 Optional
    External Secrets Optional
    Nexus Optional
    Harbor Optional

    Although we have dedicated instructions for all of these third-party tools, for those who installed EDP using cluster add-ons, we recommend installing them via add-ons correspondingly.


    1. OpenShift cluster uses Routes to provide access to pods from external resources. ↩

    2. These tools need to be installed in advance before deploying EDP. ↩↩

    \ No newline at end of file diff --git a/operator-guide/argocd-integration/index.html b/operator-guide/argocd-integration/index.html index c2b97d6ba..05746f231 100644 --- a/operator-guide/argocd-integration/index.html +++ b/operator-guide/argocd-integration/index.html @@ -1,4 +1,4 @@ - Argo CD Integration - EPAM Delivery Platform
    Skip to content

    Argo CD Integration⚓︎

    KubeRocketCI uses Argo CD as a part of the Continues Delivery/Continues Deployment implementation. Argo CD follows the best GitOps practices, uses Kubernetes native approach for the Deployment Management, has rich UI and required RBAC capabilities.

    Argo CD Deployment Approach in KubeRocketCI⚓︎

    Argo CD can be installed using two different approaches:

    • Cluster-wide scope with the cluster-admin access
    • Namespaced scope with the single namespace access

    Both approaches can be deployed with High Availability (HA) or Non High Availability (non HA) installation manifests.

    KubeRocketCI uses the HA deployment with the cluster-admin permissions, to minimize cluster resources consumption by sharing single Argo CD instance across multiple EDP Tenants. Please follow the installation instructions to deploy Argo CD.

    Argo CD Integration⚓︎

    See a diagram below for the details:

    edp-argocd
    Argo CD Diagram

    • Argo CD is deployed in a separate argocd namespace.
    • Argo CD uses a cluster-admin role for managing cluster-scope resources.
    • The control-plane application is created using the App of Apps approach, and its code is managed by the control-plane members.
    • The control-plane is used to onboard new Argo CD Tenants (Argo CD Projects - AppProject).
    • The EDP Tenant Member manages Argo CD Applications using kind: Application in the edpTenant namespace.

    The App Of Apps approach is used to manage the EDP Tenants. Inspect the edp-grub repository structure that is used to provide the EDP Tenants for the Argo CD Projects:

    edp-grub
    + Argo CD Integration - EPAM Delivery Platform      

    Argo CD Integration⚓︎

    KubeRocketCI uses Argo CD as a part of the Continues Delivery/Continues Deployment implementation. Argo CD follows the best GitOps practices, uses Kubernetes native approach for the Deployment Management, has rich UI and required RBAC capabilities.

    Argo CD Deployment Approach in KubeRocketCI⚓︎

    Argo CD can be installed using two different approaches:

    • Cluster-wide scope with the cluster-admin access
    • Namespaced scope with the single namespace access

    Both approaches can be deployed with High Availability (HA) or Non High Availability (non HA) installation manifests.

    KubeRocketCI uses the HA deployment with the cluster-admin permissions, to minimize cluster resources consumption by sharing single Argo CD instance across multiple EDP Tenants. Please follow the installation instructions to deploy Argo CD.

    Argo CD Integration⚓︎

    See a diagram below for the details:

    edp-argocd
    Argo CD Diagram

    • Argo CD is deployed in a separate argocd namespace.
    • Argo CD uses a cluster-admin role for managing cluster-scope resources.
    • The control-plane application is created using the App of Apps approach, and its code is managed by the control-plane members.
    • The control-plane is used to onboard new Argo CD Tenants (Argo CD Projects - AppProject).
    • The EDP Tenant Member manages Argo CD Applications using kind: Application in the edpTenant namespace.

    The App Of Apps approach is used to manage the EDP Tenants. Inspect the edp-grub repository structure that is used to provide the EDP Tenants for the Argo CD Projects:

    edp-grub
     ├── LICENSE
     ├── README.md
     ├── apps                      ### All Argo CD Applications are stored here
    @@ -233,4 +233,4 @@
             # Keycloak Group name
             - ArgoCD-edp-users
     
  • Then restart the deployment:

    kubectl -n argocd rollout restart deployment argo-argocd-server
    -
  • \ No newline at end of file +
    \ No newline at end of file diff --git a/operator-guide/artifacts-verification/index.html b/operator-guide/artifacts-verification/index.html index 8c99f8a6a..732eb86b0 100644 --- a/operator-guide/artifacts-verification/index.html +++ b/operator-guide/artifacts-verification/index.html @@ -1,4 +1,4 @@ - Verification of EDP Artifacts - EPAM Delivery Platform
    Skip to content

    Verification of EDP Artifacts⚓︎

    This documentation outlines platform SLSA integration and guides verifying image authenticity and provenance.

    Supply Chain Levels of Software Assurance (SLSA) is a framework for assessing and enhancing software supply chain security. Software Supply Chain Security is a critical aspect of modern software development and deployment. Supply Chain Levels of Software Assurance (SLSA) provides a framework for assessing and enhancing the security of your software supply chain.

    Prerequisites⚓︎

    Ensure you have installed rekor-cli and cosign on your environment before proceeding.


    Release Assets⚓︎

    The table below represents a list of EDP components with corresponding images that are signed and pushed to DockerHub:

    Asset Description
    codebase-operator Docker Image
    edp-headlamp Docker Image
    edp-tekton Docker Image
    cd-pipeline-operator Docker Image
    gerrit-operator Docker Image
    edp-gerrit Docker Image

    Verify Container Images⚓︎

    EPAM Delivery Platform's container images are signed using cosign with the cosign.pub key for signing and transparency. You can verify a container image's signature by executing the cosign verify command.

    To confirm the authenticity of the image, run the cosign verify command. See the example below:

    cosign verify  --key https://raw.githubusercontent.com/epam/edp-install/master/cosign.pub epamedp/codebase-operator:2.20.0 | jq .
    + Verification of EDP Artifacts - EPAM Delivery Platform      

    Verification of EDP Artifacts⚓︎

    This documentation outlines platform SLSA integration and guides verifying image authenticity and provenance.

    Supply Chain Levels of Software Assurance (SLSA) is a framework for assessing and enhancing software supply chain security. Software Supply Chain Security is a critical aspect of modern software development and deployment. Supply Chain Levels of Software Assurance (SLSA) provides a framework for assessing and enhancing the security of your software supply chain.

    Prerequisites⚓︎

    Ensure you have installed rekor-cli and cosign on your environment before proceeding.


    Release Assets⚓︎

    The table below represents a list of EDP components with corresponding images that are signed and pushed to DockerHub:

    Asset Description
    codebase-operator Docker Image
    edp-headlamp Docker Image
    edp-tekton Docker Image
    cd-pipeline-operator Docker Image
    gerrit-operator Docker Image
    edp-gerrit Docker Image

    Verify Container Images⚓︎

    EPAM Delivery Platform's container images are signed using cosign with the cosign.pub key for signing and transparency. You can verify a container image's signature by executing the cosign verify command.

    To confirm the authenticity of the image, run the cosign verify command. See the example below:

    cosign verify  --key https://raw.githubusercontent.com/epam/edp-install/master/cosign.pub epamedp/codebase-operator:2.20.0 | jq .
     

    Verification for epamedp/codebase-operator:2.20.0:

    Verification for index.docker.io/epamedp/codebase-operator:2.20.0
     The following checks were performed on each of these signatures:
       - The cosign claims were validated
    @@ -112,4 +112,4 @@
         ]
       }
     }
    -

    By signing all our artifacts, we assure you that they are trustworthy. This guide is indispensable for developers and administrators to enhance their software's reliability and meet modern security standards. The adoption of SLSA will bring you confidence while using the platform.

    \ No newline at end of file +

    By signing all our artifacts, we assure you that they are trustworthy. This guide is indispensable for developers and administrators to enhance their software's reliability and meet modern security standards. The adoption of SLSA will bring you confidence while using the platform.

    \ No newline at end of file diff --git a/operator-guide/aws-marketplace-install/index.html b/operator-guide/aws-marketplace-install/index.html index 3365d08c7..0f1bbb935 100644 --- a/operator-guide/aws-marketplace-install/index.html +++ b/operator-guide/aws-marketplace-install/index.html @@ -1,4 +1,4 @@ - Install via AWS - EPAM Delivery Platform
    Skip to content

    Install via AWS Marketplace⚓︎

    This documentation provides the detailed instructions on how to install the EPAM Delivery Platform via the AWS Marketplace.

    To initiate the installation process, navigate to our dedicated AWS Marketplace page and commence the deployment of EPAM Delivery Platform.

    Disclaimer

    EDP is aligned with industry standards for storing and managing sensitive data, ensuring optimal security. However, the use of custom solutions introduces uncertainties, thus the responsibility for the safety of your data is totally covered by platform administrator.

    Prerequisites⚓︎

    Notice

    • A basic understanding of AWS services and navigation is preferred to facilitate smoother setup and deployment processes. If you are new to AWS, please refer to the AWS Documentation for detailed information on the services and their usage.
    • Understanding of Kubernetes: Knowledge of Kubernetes concepts and architecture is recommended for effective management and operation of clusters.

    Please familiarize yourself with the Prerequisites page before deploying the product. To perform a minimal installation, ensure that you meet the following requirements:

    • The AWS Elastic Kubernetes Service (EKS) cluster is available for deployment. For detailed instructions on creating a new cluster, please consult the AWS EKS Cluster Creation Guide. Additionally, you can refer to our EKS Deployment Guide for step-by-step instructions tailored to your specific requirements.
    • The domain name is available and associated with the ingress object in cluster.
    • Cluster administrator access.
    • The Tekton resources are deployed.
    • Access to the cluster via Service Account token is available.

    Deploy EPAM Delivery Platform⚓︎

    To deploy the platform, follow the steps below:

    1. To apply Tekton stack, deploy Tekton resources by executing the command below:

       kubectl create ns tekton-pipelines
      + Install via AWS - EPAM Delivery Platform      

      Install via AWS Marketplace⚓︎

      This documentation provides the detailed instructions on how to install the EPAM Delivery Platform via the AWS Marketplace.

      To initiate the installation process, navigate to our dedicated AWS Marketplace page and commence the deployment of EPAM Delivery Platform.

      Disclaimer

      EDP is aligned with industry standards for storing and managing sensitive data, ensuring optimal security. However, the use of custom solutions introduces uncertainties, thus the responsibility for the safety of your data is totally covered by platform administrator.

      Prerequisites⚓︎

      Notice

      • A basic understanding of AWS services and navigation is preferred to facilitate smoother setup and deployment processes. If you are new to AWS, please refer to the AWS Documentation for detailed information on the services and their usage.
      • Understanding of Kubernetes: Knowledge of Kubernetes concepts and architecture is recommended for effective management and operation of clusters.

      Please familiarize yourself with the Prerequisites page before deploying the product. To perform a minimal installation, ensure that you meet the following requirements:

      • The AWS Elastic Kubernetes Service (EKS) cluster is available for deployment. For detailed instructions on creating a new cluster, please consult the AWS EKS Cluster Creation Guide. Additionally, you can refer to our EKS Deployment Guide for step-by-step instructions tailored to your specific requirements.
      • The domain name is available and associated with the ingress object in cluster.
      • Cluster administrator access.
      • The Tekton resources are deployed.
      • Access to the cluster via Service Account token is available.

      Deploy EPAM Delivery Platform⚓︎

      To deploy the platform, follow the steps below:

      1. To apply Tekton stack, deploy Tekton resources by executing the command below:

         kubectl create ns tekton-pipelines
          kubectl create ns tekton-chains
          kubectl create ns tekton-pipelines-resolvers
          kubectl apply --filename https://storage.googleapis.com/tekton-releases/triggers/latest/release.yaml
        @@ -24,4 +24,4 @@
         EOF
         
      2. (Optional) To get access to EDP Portal, run the port-forwarding command:

         kubectl port-forward service/edp-headlamp 59480:80 -n edp
         
      3. (Optional) To open EDP Portal, navigate to the http://localhost:59480.

      4. (Optional) To get admin token to sign in to EDP Portal:

        kubectl get secrets -o jsonpath="{.items[?(@.metadata.annotations['kubernetes\.io/service-account\.name']=='edp-admin')].data.token}" -n edp|base64 --decode
        -

      As a result, you will get access to EPAM Delivery Platform components via EDP Portal UI. Navigate to our Use Cases to try out EDP functionality. Visit other subsections of the Operator Guide to figure out how to configure EDP and integrate it with various tools.

      \ No newline at end of file +

    As a result, you will get access to EPAM Delivery Platform components via EDP Portal UI. Navigate to our Use Cases to try out EDP functionality. Visit other subsections of the Operator Guide to figure out how to configure EDP and integrate it with various tools.

    \ No newline at end of file diff --git a/operator-guide/capsule/index.html b/operator-guide/capsule/index.html index 1475ac1f9..4af6215f9 100644 --- a/operator-guide/capsule/index.html +++ b/operator-guide/capsule/index.html @@ -1,4 +1,4 @@ - Integrate Capsule - EPAM Delivery Platform
    Skip to content

    Integrate Capsule⚓︎

    This article outlines how the EPAM Delivery Platform (EDP) leverages Capsule capabilities to enable isolation for both the core platform components and the business applications workload.

    EPAM Delivery Platform uses Capsule to ensure resource isolation. It is crucial to define constraints through the Capsule tenant approach. This approach serves two primary objectives: limiting the resources allocated to the EDP components and regulating the resources utilized by each deployed environment.

    To ensure isolation for the core platform components, create the edp namespace under the Capsule tenant. Use the template provided in this instruction to create the Capsule tenant specifically for the core components.

    Integration⚓︎

    The following scheme outlines the general steps involved in configuring Capsule for seamless integration with EDP. This process ensures efficient resource isolation, allowing for the deployment and management of both EDP core platform components and business application workloads. Follow the sequential flow of the scheme to successfully integrate Capsule with the EPAM Delivery Platform:

    graph LR;
    + Integrate Capsule - EPAM Delivery Platform      

    Integrate Capsule⚓︎

    This article outlines how the EPAM Delivery Platform (EDP) leverages Capsule capabilities to enable isolation for both the core platform components and the business applications workload.

    EPAM Delivery Platform uses Capsule to ensure resource isolation. It is crucial to define constraints through the Capsule tenant approach. This approach serves two primary objectives: limiting the resources allocated to the EDP components and regulating the resources utilized by each deployed environment.

    To ensure isolation for the core platform components, create the edp namespace under the Capsule tenant. Use the template provided in this instruction to create the Capsule tenant specifically for the core components.

    Integration⚓︎

    The following scheme outlines the general steps involved in configuring Capsule for seamless integration with EDP. This process ensures efficient resource isolation, allowing for the deployment and management of both EDP core platform components and business application workloads. Follow the sequential flow of the scheme to successfully integrate Capsule with the EPAM Delivery Platform:

    graph LR;
         A(Capsule Installation) --> B(Tenant Configuration) --> C(Impersonation) --> D(Create EDP Namespace)--> E(Deploy EDP)
    1. Capsule Installation - This initial step involves setting up Capsule on your environment.

    2. Tenant Configuration - Once Capsule is installed, the next critical step is configuring the Capsule tenant. This involves defining specific parameters and constraints to regulate the allocation of resources to EDP components.

    3. Impersonation - Impersonation plays a role in managing user identities and permissions within the Capsule environment. This step ensures secure and controlled access to resources.

    4. Create EDP Namespace - The creation of a dedicated namespace under Capsule is crucial for isolating and managing the core components of EDP. This step establishes the environment where EDP will be deployed.

    5. Deploy EDP - The final step involves deploying the EPAM Delivery Platform within the configured Capsule environment.

    Installation⚓︎

    The installation procedure consists of two steps:

    1. Define the namespace where EDP will be installed in Capsule values:

      values.yaml
       1
        2
        3
      @@ -147,4 +147,4 @@
         state: Active
       
    2. Enable Capsule in the values.yaml file to make it possible to manage deployable environments:

      values.yaml
      cd-pipeline-operator:
          tenancyEngine: "capsule"
      -
    3. As a result of following these instructions, Capsule policies are used to manage the EDP core components and deployable environments. By adhering to these guidelines, you should successfully install EDP with Capsule enabled as the tenancyEngine.

      \ No newline at end of file +

      As a result of following these instructions, Capsule policies are used to manage the EDP core components and deployable environments. By adhering to these guidelines, you should successfully install EDP with Capsule enabled as the tenancyEngine.

      \ No newline at end of file diff --git a/operator-guide/configure-keycloak-oidc-eks/index.html b/operator-guide/configure-keycloak-oidc-eks/index.html index dbbbb6b98..366a34efe 100644 --- a/operator-guide/configure-keycloak-oidc-eks/index.html +++ b/operator-guide/configure-keycloak-oidc-eks/index.html @@ -1,4 +1,4 @@ - EKS OIDC With Keycloak - EPAM Delivery Platform

      EKS OIDC With Keycloak⚓︎

      This article provides the instruction of configuring Keycloak as OIDC Identity Provider for EKS. The example is written on Terraform (HCL).

      Prerequisites⚓︎

      To follow the instruction, check the following prerequisites:

      1. terraform 0.14.10
      2. hashicorp/aws = 4.8.0
      3. mrparkers/keycloak >= 3.0.0
      4. hashicorp/kubernetes ~> 2.9.0
      5. kubectl = 1.22
      6. kubelogin >= v1.25.1
      7. Ensure that Keycloak has network availability for AWS (not in a private network).

      Note

      To connect OIDC with a cluster, install and configure the kubelogin plugin. For Windows, it is recommended to download the kubelogin as a binary and add it to your PATH.

      Solution Overview⚓︎

      The solution includes three types of the resources - AWS (EKS), Keycloak, Kubernetes. The left part of Keycloak resources remain unchanged after creation, thus allowing us to associate a claim for a user group membership. Other resources can be created, deleted or changed if needed. The most crucial from Kubernetes permissions are Kubernetes RoleBindings and ClusterRoles/Roles. Roles present a set of permissions, in turn RoleBindings map Kubernetes Role to representative Keycloak groups, so a group member can have just appropriate permissions.

      EKS Keycloak OIDC
      EKS Keycloak OIDC

      Keycloak Configuration⚓︎

      To configure Keycloak, follow the steps described below.

      • Create a client:
      resource "keycloak_openid_client" "openid_client" {
      + EKS OIDC With Keycloak - EPAM Delivery Platform      

      EKS OIDC With Keycloak⚓︎

      This article provides the instruction of configuring Keycloak as OIDC Identity Provider for EKS. The example is written on Terraform (HCL).

      Prerequisites⚓︎

      To follow the instruction, check the following prerequisites:

      1. terraform 0.14.10
      2. hashicorp/aws = 4.8.0
      3. mrparkers/keycloak >= 3.0.0
      4. hashicorp/kubernetes ~> 2.9.0
      5. kubectl = 1.22
      6. kubelogin >= v1.25.1
      7. Ensure that Keycloak has network availability for AWS (not in a private network).

      Note

      To connect OIDC with a cluster, install and configure the kubelogin plugin. For Windows, it is recommended to download the kubelogin as a binary and add it to your PATH.

      Solution Overview⚓︎

      The solution includes three types of the resources - AWS (EKS), Keycloak, Kubernetes. The left part of Keycloak resources remain unchanged after creation, thus allowing us to associate a claim for a user group membership. Other resources can be created, deleted or changed if needed. The most crucial from Kubernetes permissions are Kubernetes RoleBindings and ClusterRoles/Roles. Roles present a set of permissions, in turn RoleBindings map Kubernetes Role to representative Keycloak groups, so a group member can have just appropriate permissions.

      EKS Keycloak OIDC
      EKS Keycloak OIDC

      Keycloak Configuration⚓︎

      To configure Keycloak, follow the steps described below.

      • Create a client:
      resource "keycloak_openid_client" "openid_client" {
         realm_id                                  = "openshift"
         client_id                                 = "kubernetes"
         access_type                               = "CONFIDENTIAL"
      @@ -154,4 +154,4 @@
       User "https://<keycloak_url>/auth/realms/<realm>#<keycloak_user_id>"
       cannot list resource "ingresses" in API group "networking.k8s.io" in the namespace "<namespace_name>"
       

      Session Update⚓︎

      To update the session, clear cache. The default location for the login cache:

      rm -rf ~/.kube/cache
      -

      Access Cluster via Lens⚓︎

      To access the Kubernetes cluster via Lens, follow the steps below to configure it:

      • Add a new kubeconfig to the location where Lens has access. The default location of the kubeconfig is ~/.kube/config but it can be changed by navigating to File -> Preferences -> Kubernetes -> Kubeconfig Syncs;
      • (Optional) Using Windows, it is recommended to reboot the system after adding a new kubeconfig;
      • Authenticate on the Keycloak login page to be able to access the cluster.

      Note

      Lens does not add namespaces of the project automatically, so it is necessary to add them manually, simply go to Settings -> Namespaces and add the namespaces of a project.

      \ No newline at end of file +

      Access Cluster via Lens⚓︎

      To access the Kubernetes cluster via Lens, follow the steps below to configure it:

      • Add a new kubeconfig to the location where Lens has access. The default location of the kubeconfig is ~/.kube/config but it can be changed by navigating to File -> Preferences -> Kubernetes -> Kubeconfig Syncs;
      • (Optional) Using Windows, it is recommended to reboot the system after adding a new kubeconfig;
      • Authenticate on the Keycloak login page to be able to access the cluster.

      Note

      Lens does not add namespaces of the project automatically, so it is necessary to add them manually, simply go to Settings -> Namespaces and add the namespaces of a project.

      \ No newline at end of file diff --git a/operator-guide/container-registries/index.html b/operator-guide/container-registries/index.html index 5d51630ca..5e2eb3761 100644 --- a/operator-guide/container-registries/index.html +++ b/operator-guide/container-registries/index.html @@ -1,8 +1,8 @@ - Change Container Registry - EPAM Delivery Platform

      Change Container Registry⚓︎

      In dynamic projects, changes to the container registry may be necessary. This section provides instructions for switching the container registry.

      Warning

      Exercise caution: Removing registry settings may disrupt your CI/CD process. New components created after changing the registry, including Components and Environments, will seamlessly function. However, existing 'Components' require additional steps, as outlined below.

      Remove Container Registry⚓︎

      To remove container registry integration from the EDP, follow the steps below:

      1. In the EDP Portal main menu, navigate to EDP -> Configuration -> Registry.

      2. Click the Reset registry button, type the confirm word and then click Confirm:

      Registry settings
      Registry settings

      Update Registry for the Existing Components and Environments⚓︎

      The EPAM Delivery Platform uses CodebaseImageStream custom resource to define Container Registry settings for the codebases. To update the registry for the existing codebases, follow the steps below:

      1. List all the existing CodebaseImageStream CR(s) and copy their <name> and <codebase name> fields:

        kubectl get codebaseimagestream -n edp
        + Change Container Registry - EPAM Delivery Platform      

        Change Container Registry⚓︎

        In dynamic projects, changes to the container registry may be necessary. This section provides instructions for switching the container registry.

        Warning

        Exercise caution: Removing registry settings may disrupt your CI/CD process. New components created after changing the registry, including Components and Environments, will seamlessly function. However, existing 'Components' require additional steps, as outlined below.

        Remove Container Registry⚓︎

        To remove container registry integration from the EDP, follow the steps below:

        1. In the EDP Portal main menu, navigate to EDP -> Configuration -> Registry.

        2. Click the Reset registry button, type the confirm word and then click Confirm:

        Registry settings
        Registry settings

        Update Registry for the Existing Components and Environments⚓︎

        The EPAM Delivery Platform uses CodebaseImageStream custom resource to define Container Registry settings for the codebases. To update the registry for the existing codebases, follow the steps below:

        1. List all the existing CodebaseImageStream CR(s) and copy their <name> and <codebase name> fields:

          kubectl get codebaseimagestream -n edp
           
        2. Patch the CodebaseImageStream CR(s) using the commands for the registry you switched to:

          kubectl patch codebaseimagestream <name> -n edp --type='json' -p='[{"op": "replace", "path": "/spec/imageName", "value": "<Registry Endpoint>/<Registry Space>/<codebase name>"}]'
           
          kubectl patch codebaseimagestream <name> -n edp --type='json' -p='[{"op": "replace", "path": "/spec/imageName", "value": "dockerhub.io/<User>/<codebase name>"}]'
           
          kubectl patch codebaseimagestream <name> -n edp --type='json' -p='[{"op": "replace", "path": "/spec/imageName", "value": "<Registry Endpoint>/<Registry Space>/<codebase name>}]'
           
          kubectl patch codebaseimagestream <name> -n edp --type='json' -p='[{"op": "replace", "path": "/spec/imageName", "value": "<Registry Endpoint>/<Project>/<codebase name>}"}]'
           
          kubectl patch codebaseimagestream <name> -n edp --type='json' -p='[{"op": "replace", "path": "/spec/imageName", "value": "<Registry Endpoint>/<Registry Space>/<codebase name>}]'
           

        If necessary, update the registry credentials for the existing CD pipelines by copying the regcred secret from the edp namespace to all the namespaces managed by the platform. To get the list of the namespaces, run the following command:

        kubectl get stages -n edp -o jsonpath='{range .items[*]}{.spec.namespace}{"\n"}{end}'
        -
        \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/container-registry-harbor-integration-tekton-ci/index.html b/operator-guide/container-registry-harbor-integration-tekton-ci/index.html index 7a4c21f77..01f361597 100644 --- a/operator-guide/container-registry-harbor-integration-tekton-ci/index.html +++ b/operator-guide/container-registry-harbor-integration-tekton-ci/index.html @@ -1,4 +1,4 @@ - Harbor Integration - EPAM Delivery Platform

      Integrate Harbor With EDP Pipelines⚓︎

      Harbor serves as a tool for storing images and artifacts. This documentation contains instructions on how to create a project in Harbor and set up a robot account for interacting with the registry from CI pipelines.

      Overview⚓︎

      Harbor integration with Tekton enables the centralized storage of container images within the cluster, eliminating the need for external services. By leveraging Harbor as the container registry, users can manage and store their automation results and reports in one place.

      Integration Procedure⚓︎

      The integration process involves two steps:

      1. Creating a project to store application images.

      2. Creating two accounts with different permissions to push (read/write) and pull (read-only) project images.

      Create New Project⚓︎

      The process of creating new projects is the following:

      1. Log in to the Harbor console using your credentials.
      2. Navigate to the Projects menu, click the New Project button:

        Harbor Console
        Projects menu

      3. On the New Project menu, enter a project name that matches your EDP namespace in the Project Name field. Keep other fields as default and click OK to continue:

        New Project
        New Project menu

      Set Up Robot Account⚓︎

      To make EDP and Harbor project interact with each other, set up a robot account:

      1. Navigate to your newly created project, select Robot Accounts menu and choose New Robot Account:

        New Project
        Create Robot Account menu

      2. In the pop-up window, fill in the fields as follows:

        • Name - edp-push;
        • Expiration time - set the value which is aligned with your organization policy;
        • Description - read/write permissions;
        • Permissions - Pull Repository and Push Repository.

        To proceed, click the ADD button:

        New Project
        Robot Accounts menu

      3. In the appeared window, copy the robot account credentials or click the Export to file button to save the secret and account name locally:

        New Project
        New credentials for Robot Account

      4. Provision the kaniko-docker-config secrets using kubectl, EDP Portal or with the externalSecrets operator:

        Example

        The auth string can be generated by this command:

        echo -n "robot\$edp-project+edp:secret" | base64
        + Harbor Integration - EPAM Delivery Platform      

        Integrate Harbor With EDP Pipelines⚓︎

        Harbor serves as a tool for storing images and artifacts. This documentation contains instructions on how to create a project in Harbor and set up a robot account for interacting with the registry from CI pipelines.

        Overview⚓︎

        Harbor integration with Tekton enables the centralized storage of container images within the cluster, eliminating the need for external services. By leveraging Harbor as the container registry, users can manage and store their automation results and reports in one place.

        Integration Procedure⚓︎

        The integration process involves two steps:

        1. Creating a project to store application images.

        2. Creating two accounts with different permissions to push (read/write) and pull (read-only) project images.

        Create New Project⚓︎

        The process of creating new projects is the following:

        1. Log in to the Harbor console using your credentials.
        2. Navigate to the Projects menu, click the New Project button:

          Harbor Console
          Projects menu

        3. On the New Project menu, enter a project name that matches your EDP namespace in the Project Name field. Keep other fields as default and click OK to continue:

          New Project
          New Project menu

        Set Up Robot Account⚓︎

        To make EDP and Harbor project interact with each other, set up a robot account:

        1. Navigate to your newly created project, select Robot Accounts menu and choose New Robot Account:

          New Project
          Create Robot Account menu

        2. In the pop-up window, fill in the fields as follows:

          • Name - edp-push;
          • Expiration time - set the value which is aligned with your organization policy;
          • Description - read/write permissions;
          • Permissions - Pull Repository and Push Repository.

          To proceed, click the ADD button:

          New Project
          Robot Accounts menu

        3. In the appeared window, copy the robot account credentials or click the Export to file button to save the secret and account name locally:

          New Project
          New credentials for Robot Account

        4. Provision the kaniko-docker-config secrets using kubectl, EDP Portal or with the externalSecrets operator:

          Example

          The auth string can be generated by this command:

          echo -n "robot\$edp-project+edp:secret" | base64
           
            apiVersion: v1
             kind: Secret
             metadata:
          @@ -79,4 +79,4 @@
           
        5. (Optional) If you've already deployed the EDP Helm chart, you can update it using the following command:

          helm update --install edp epamedp/edp-install \
           --values values.yaml \
           --namespace edp
          -

        As a result, application images built in EDP Portal will be stored in Harbor project and will be deployed from the harbor registry.

        Harbor projects can be added and retained with a retention policy generated through the EDP script in edp-cluster-add-ons.

        \ No newline at end of file +

      As a result, application images built in EDP Portal will be stored in Harbor project and will be deployed from the harbor registry.

      Harbor projects can be added and retained with a retention policy generated through the EDP script in edp-cluster-add-ons.

      \ No newline at end of file diff --git a/operator-guide/customize_deployment/index.html b/operator-guide/customize_deployment/index.html index bb4ca5dab..1a02537b2 100644 --- a/operator-guide/customize_deployment/index.html +++ b/operator-guide/customize_deployment/index.html @@ -1,4 +1,4 @@ - Customize Deployment - EPAM Delivery Platform

      Customize Deployment⚓︎

      When deploying applications into environments, it's important to automate both pre-deployment and post-deployment procedures.

      Pre-deployment procedures include essential tasks such as deploying databases, configuring specific software, and preparing the environment. Additionally, post-deployment procedures, such as testing, configuring, and removing old information from the environment, are crucial for ensuring the smooth operation of the deployed application. To facilitate these processes, the custom deployment feature was implemented in KubeRocketCI.

      This page provides comprehensive guidelines on how to adjust the deployment logic to cater your needs.

      Deploy Custom Pipeline⚓︎

      Overall, the custom pipeline creation involves the following steps:

      graph LR;
      + Customize Deployment - EPAM Delivery Platform      

      Customize Deployment⚓︎

      When deploying applications into environments, it's important to automate both pre-deployment and post-deployment procedures.

      Pre-deployment procedures include essential tasks such as deploying databases, configuring specific software, and preparing the environment. Additionally, post-deployment procedures, such as testing, configuring, and removing old information from the environment, are crucial for ensuring the smooth operation of the deployed application. To facilitate these processes, the custom deployment feature was implemented in KubeRocketCI.

      This page provides comprehensive guidelines on how to adjust the deployment logic to cater your needs.

      Deploy Custom Pipeline⚓︎

      Overall, the custom pipeline creation involves the following steps:

      graph LR;
           A(Create TriggerTemplate resource) --> B(Create Pipeline resource) --> C(Deploy custom environment)
      1. Create TriggerTemplate resource - On this step, we create the TriggerTemplate custom resource that will appear as an option in the in the environment stage creation menu.
      2. Create Pipeline - On this step, we create custom resource called Pipeline that complements the trigger template. This resource contains all the tasks to perform within the custom pipeline.
      3. Integration - On this step, you simply select your custom pipeline logic when creating a stage for your environment.

      To customize your deployment pipeline, follow the steps below:

      1. Create the TriggerTemplate custom resource by adding the following label:
        labels:
             app.edp.epam.com/pipelinetype: deploy
        -

      Note

      Please refer to the TriggerTemplate example for more details. Remember to set your pipeline name in the spec.resourcetemplates.spec.pipelineRef.name parameter (line #33).

      1. Create the custom pipeline with your custom logic. Refer to the custom pipeline example for more details.

      2. Apply the created manifest files in the edp namespace.

      3. In the Create stage window of the KubeRocketCI portal, select the added trigger template in the corresponding window:

        Select trigger template
        Select trigger template

      4. (Optional) In case you need to implement custom deployment in a remote cluster, do the following:

        • Connect the KubeRocketCI platform with the remote cluster if it is not integrated yet. Please refer to the Add Cluster page for more details;
        • Mount the secret to the run-quality-gate resource by changing the volumes and volumeMounts sections;
        • Switch the context by specifying the appropriate kube config file of the run-quality-gate resource;
        • In the Create stage window of the KubeRocketCI portal, select the appropriate cluster in the corresponding window:

          Select cluster
          Select cluster

      Related Articles⚓︎

      \ No newline at end of file +

      Note

      Please refer to the TriggerTemplate example for more details. Remember to set your pipeline name in the spec.resourcetemplates.spec.pipelineRef.name parameter (line #33).

      1. Create the custom pipeline with your custom logic. Refer to the custom pipeline example for more details.

      2. Apply the created manifest files in the edp namespace.

      3. In the Create stage window of the KubeRocketCI portal, select the added trigger template in the corresponding window:

        Select trigger template
        Select trigger template

      4. (Optional) In case you need to implement custom deployment in a remote cluster, do the following:

        • Connect the KubeRocketCI platform with the remote cluster if it is not integrated yet. Please refer to the Add Cluster page for more details;
        • Mount the secret to the run-quality-gate resource by changing the volumes and volumeMounts sections;
        • Switch the context by specifying the appropriate kube config file of the run-quality-gate resource;
        • In the Create stage window of the KubeRocketCI portal, select the appropriate cluster in the corresponding window:

          Select cluster
          Select cluster

      Related Articles⚓︎

      \ No newline at end of file diff --git a/operator-guide/delete-edp/index.html b/operator-guide/delete-edp/index.html index ef19cb18c..d63caec67 100644 --- a/operator-guide/delete-edp/index.html +++ b/operator-guide/delete-edp/index.html @@ -1,4 +1,4 @@ - Uninstall EDP - EPAM Delivery Platform

      Uninstall EDP⚓︎

      This tutorial provides detailed instructions on the optimal method to uninstall the EPAM Delivery Platform.

      Deletion Procedure⚓︎

      To uninstall EDP, perform the following steps:

      1. It is highly recommended to delete all the resources created via EDP Portal UI first. It can be:

        • Applications;
        • Libraries;
        • Autotests;
        • Infrastructures;
        • CD Pipelines.

        We recommend deleting them via EDP Portal UI respectively, although it is also possible to delete all the EDP Portal resources using the kubectl delete command.

      2. Delete application namespaces. They should be called according to the edp-<cd-pipeline>-<stage-name> pattern.

      3. Uninstall EDP the same way it was installed.

      4. Run the script that deletes the rest of the custom resources:

        View: CleanEDP.sh
        #!/bin/sh
        + Uninstall EDP - EPAM Delivery Platform      

        Uninstall EDP⚓︎

        This tutorial provides detailed instructions on the optimal method to uninstall the EPAM Delivery Platform.

        Deletion Procedure⚓︎

        To uninstall EDP, perform the following steps:

        1. It is highly recommended to delete all the resources created via EDP Portal UI first. It can be:

          • Applications;
          • Libraries;
          • Autotests;
          • Infrastructures;
          • CD Pipelines.

          We recommend deleting them via EDP Portal UI respectively, although it is also possible to delete all the EDP Portal resources using the kubectl delete command.

        2. Delete application namespaces. They should be called according to the edp-<cd-pipeline>-<stage-name> pattern.

        3. Uninstall EDP the same way it was installed.

        4. Run the script that deletes the rest of the custom resources:

          View: CleanEDP.sh
          #!/bin/sh
           
           ###################################################################
           # A POSIX script to remove EDP Kubernetes Custom Resources        #
          @@ -193,4 +193,4 @@
           done
           
           main_func
          -

          The script will prompt user to specify the namespace where EDP was deployed in and choose if the namespace is going to be deleted. This script will delete EDP custom resources in the namespace specified by user.

        5. In Keycloak, delete the edp-main realm, also delete client which is supposed to be called by the edp-main pattern in the openshift realm.

        \ No newline at end of file +

        The script will prompt user to specify the namespace where EDP was deployed in and choose if the namespace is going to be deleted. This script will delete EDP custom resources in the namespace specified by user.

      5. In Keycloak, delete the edp-main realm, also delete client which is supposed to be called by the edp-main pattern in the openshift realm.

      \ No newline at end of file diff --git a/operator-guide/dependency-track/index.html b/operator-guide/dependency-track/index.html index e18296d0d..0d9489273 100644 --- a/operator-guide/dependency-track/index.html +++ b/operator-guide/dependency-track/index.html @@ -1,4 +1,4 @@ - DependencyTrack - EPAM Delivery Platform

      Install DependencyTrack⚓︎

      This documentation guide provides comprehensive instructions for installing and integrating DependencyTrack with the EPAM Delivery Platform.

      Prerequisites⚓︎

      • Kubectl version 1.26.0 is installed.
      • Helm version 3.12.0+ is installed.

      Installation⚓︎

      To install DependencyTrack use EDP addons approach.

      Configuration⚓︎

      1. Open Administration -> Access Management -> Teams. Click Create Team -> Automation and click Create.

      2. Click + in Permissions and add:

        BOM_UPLOAD
        + DependencyTrack - EPAM Delivery Platform      

        Install DependencyTrack⚓︎

        This documentation guide provides comprehensive instructions for installing and integrating DependencyTrack with the EPAM Delivery Platform.

        Prerequisites⚓︎

        • Kubectl version 1.26.0 is installed.
        • Helm version 3.12.0+ is installed.

        Installation⚓︎

        To install DependencyTrack use EDP addons approach.

        Configuration⚓︎

        1. Open Administration -> Access Management -> Teams. Click Create Team -> Automation and click Create.

        2. Click + in Permissions and add:

          BOM_UPLOAD
           PROJECT_CREATION_UPLOAD
           VIEW_PORTFOLIO
           
        3. Click + in API keys to create token:

        DependencyTrack settings
        DependencyTrack settings

        1. Provision secrets using manifest, EDP Portal, or with the externalSecrets operator:
        apiVersion: v1
        @@ -18,4 +18,4 @@
           "token": "XXXXXXXXXXXX",
           "url": "https://dependency-track.example.com"
         }
        -

        Go to the EDP Platform UI open EDP -> Configuration -> DependencyTrack see the Managed by External Secret.

        DependencyTrack managed by external secret operator
        DependencyTrack managed by external secret operator

        More detail on External Secrets Operator Integration can be found on the following page

        After following the instructions provided, you should be able to integrate your DependencyTrack with the EPAM Delivery Platform.

        \ No newline at end of file +

        Go to the EDP Platform UI open EDP -> Configuration -> DependencyTrack see the Managed by External Secret.

        DependencyTrack managed by external secret operator
        DependencyTrack managed by external secret operator

        More detail on External Secrets Operator Integration can be found on the following page

      After following the instructions provided, you should be able to integrate your DependencyTrack with the EPAM Delivery Platform.

      \ No newline at end of file diff --git a/operator-guide/deploy-aws-eks/index.html b/operator-guide/deploy-aws-eks/index.html index 083dfee37..29c33a807 100644 --- a/operator-guide/deploy-aws-eks/index.html +++ b/operator-guide/deploy-aws-eks/index.html @@ -1,4 +1,4 @@ - Deploy AWS EKS Cluster - EPAM Delivery Platform

      Deploy AWS EKS Cluster⚓︎

      This instruction offers a comprehensive guide on deploying an Amazon Elastic Kubernetes Service (EKS) cluster, ensuring a scalable and secure Kubernetes environment on AWS. For those looking to optimize their EKS cluster configurations, it is highly recommended to consult the AWS EKS Best Practices guide. This resource covers a wide range of topics crucial for the successful deployment and operation of your EKS clusters, including:

      • Security: Best practices for securing your EKS clusters, including IAM roles, network policies, and secrets management.
      • Networking: Guidance on setting up VPCs, subnets, and load balancers to ensure efficient and secure network traffic.
      • Monitoring and Logging: Strategies for implementing comprehensive monitoring and logging solutions using AWS CloudWatch and other tools to maintain visibility into cluster performance and operational health.
      • Performance: Tips for optimizing cluster performance through the proper selection of EC2 instances, efficient load balancing, and autoscaling configurations.
      • Cost Optimization: Techniques for managing and reducing costs associated with running EKS clusters, including instance selection and resource allocation strategies.

      By adhering to these best practices, developers and system administrators can ensure that their AWS EKS clusters are robust, secure, and cost-effective, facilitating a smooth and efficient CI/CD pipeline for software development.

      Prerequisites⚓︎

      Note

      Our approach to deploying the AWS EKS Cluster is based on the widely-used terraform-aws-eks module from the Terraform AWS Modules community. This module facilitates the creation of AWS Elastic Kubernetes Service (EKS) resources with best practices in mind. We encourage users to review the module's documentation to fully understand its capabilities and how it aligns with the requirements of your specific deployment scenario.

      Before the EKS cluster deployment and configuration, make sure to check the prerequisites. Install the required tools listed below:

      To check the correct tools installation, run the following commands:

      git --version
      + Deploy AWS EKS Cluster - EPAM Delivery Platform      

      Deploy AWS EKS Cluster⚓︎

      This instruction offers a comprehensive guide on deploying an Amazon Elastic Kubernetes Service (EKS) cluster, ensuring a scalable and secure Kubernetes environment on AWS. For those looking to optimize their EKS cluster configurations, it is highly recommended to consult the AWS EKS Best Practices guide. This resource covers a wide range of topics crucial for the successful deployment and operation of your EKS clusters, including:

      • Security: Best practices for securing your EKS clusters, including IAM roles, network policies, and secrets management.
      • Networking: Guidance on setting up VPCs, subnets, and load balancers to ensure efficient and secure network traffic.
      • Monitoring and Logging: Strategies for implementing comprehensive monitoring and logging solutions using AWS CloudWatch and other tools to maintain visibility into cluster performance and operational health.
      • Performance: Tips for optimizing cluster performance through the proper selection of EC2 instances, efficient load balancing, and autoscaling configurations.
      • Cost Optimization: Techniques for managing and reducing costs associated with running EKS clusters, including instance selection and resource allocation strategies.

      By adhering to these best practices, developers and system administrators can ensure that their AWS EKS clusters are robust, secure, and cost-effective, facilitating a smooth and efficient CI/CD pipeline for software development.

      Prerequisites⚓︎

      Note

      Our approach to deploying the AWS EKS Cluster is based on the widely-used terraform-aws-eks module from the Terraform AWS Modules community. This module facilitates the creation of AWS Elastic Kubernetes Service (EKS) resources with best practices in mind. We encourage users to review the module's documentation to fully understand its capabilities and how it aligns with the requirements of your specific deployment scenario.

      Before the EKS cluster deployment and configuration, make sure to check the prerequisites. Install the required tools listed below:

      To check the correct tools installation, run the following commands:

      git --version
       terraform version
       aws --version
       tfenv --version
      @@ -214,4 +214,4 @@
       
       argocd_agent_role_iam_role_arn = "arn:aws:iam::012345678910:role/EDPArgoCDClusterAdmin"
       argocd_irsa_iam_role_arn = ""
      -
      \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/deploy-okd-4.10/index.html b/operator-guide/deploy-okd-4.10/index.html index e7c51cf71..4b7cd897c 100644 --- a/operator-guide/deploy-okd-4.10/index.html +++ b/operator-guide/deploy-okd-4.10/index.html @@ -1,4 +1,4 @@ - Deploy OKD 4.10 Cluster - EPAM Delivery Platform

      Deploy OKD 4.10 Cluster⚓︎

      This instruction provides detailed information on the OKD 4.10 cluster deployment in the AWS Cloud and contains the additional setup necessary for the managed infrastructure.

      A full description of the cluster deployment can be found in the official documentation.

      Prerequisites⚓︎

      Before the OKD cluster deployment and configuration, make sure to check the prerequisites.

      Required Tools⚓︎

      1. Install the following tools listed below:

      2. Create the AWS IAM user with the required permissions. Make sure the AWS account is active, and the user doesn't have a permission boundary. Remove any Service Control Policy (SCP) restrictions from the AWS account.

      3. Generate a key pair for cluster node SSH access. Please perform the steps below:

      \ No newline at end of file diff --git a/operator-guide/deploy-okd/index.html b/operator-guide/deploy-okd/index.html index 5edea7491..b64e89b12 100644 --- a/operator-guide/deploy-okd/index.html +++ b/operator-guide/deploy-okd/index.html @@ -1,4 +1,4 @@ - Deploy OKD 4.9 Cluster - EPAM Delivery Platform

      Deploy OKD 4.9 Cluster⚓︎

      This instruction provides detailed information on the OKD 4.9 cluster deployment in the AWS Cloud and contains the additional setup necessary for the managed infrastructure.

      A full description of the cluster deployment can be found in the official documentation.

      Prerequisites⚓︎

      Before the OKD cluster deployment and configuration, make sure to check the prerequisites.

      Required Tools⚓︎

      1. Install the following tools listed below:

      2. Create the AWS IAM user with the required permissions. Make sure the AWS account is active, and the user doesn't have a permission boundary. Remove any Service Control Policy (SCP) restrictions from the AWS account.

      3. Generate a key pair for cluster node SSH access. Please perform the steps below:

      \ No newline at end of file diff --git a/operator-guide/ebs-csi-driver/index.html b/operator-guide/ebs-csi-driver/index.html index 9b3750c82..f71cca971 100644 --- a/operator-guide/ebs-csi-driver/index.html +++ b/operator-guide/ebs-csi-driver/index.html @@ -1,4 +1,4 @@ - Install Amazon EBS CSI Driver - EPAM Delivery Platform

      Install Amazon EBS CSI Driver⚓︎

      The Amazon Elastic Block Store (Amazon EBS) Container Storage Interface (CSI) driver allows Amazon Elastic Kubernetes Service (Amazon EKS) clusters to manage the lifecycle of Amazon EBS volumes for Kubernetes Persistent Volumes.

      Prerequisites⚓︎

      An existing AWS Identity and Access Management (IAM) OpenID Connect (OIDC) provider for your cluster. To determine whether you already have an OIDC provider or to create a new one, see Creating an IAM OIDC provider for your cluster.

      To add an Amazon EBS CSI add-on, please follow the steps below:

      1. Check your cluster details (the random value in the cluster name will be required in the next step):

        kubectl cluster-info
        + Install Amazon EBS CSI Driver - EPAM Delivery Platform      

        Install Amazon EBS CSI Driver⚓︎

        The Amazon Elastic Block Store (Amazon EBS) Container Storage Interface (CSI) driver allows Amazon Elastic Kubernetes Service (Amazon EKS) clusters to manage the lifecycle of Amazon EBS volumes for Kubernetes Persistent Volumes.

        Prerequisites⚓︎

        An existing AWS Identity and Access Management (IAM) OpenID Connect (OIDC) provider for your cluster. To determine whether you already have an OIDC provider or to create a new one, see Creating an IAM OIDC provider for your cluster.

        To add an Amazon EBS CSI add-on, please follow the steps below:

        1. Check your cluster details (the random value in the cluster name will be required in the next step):

          kubectl cluster-info
           
        2. Create Kubernetes IAM Trust Policy for Amazon EBS CSI Driver. Replace AWS_ACCOUNT_ID with your account ID, AWS_REGION with your AWS Region, and EXAMPLED539D4633E53DE1B71EXAMPLE with the value that was returned in the previous step. Save this Trust Policy into a file aws-ebs-csi-driver-trust-policy.json.

          aws-ebs-csi-driver-trust-policy.json
            {
               "Version": "2012-10-17",
               "Statement": [
          @@ -27,4 +27,4 @@
             --service-account-role-arn arn:aws:iam::AWS_ACCOUNT_ID:role/AmazonEKS_EBS_CSI_DriverRole
           

          Note

          When the plugin is deployed, it creates the ebs-csi-controller-sa service account. The service account is bound to a Kubernetes ClusterRole with the required Kubernetes permissions. The ebs-csi-controller-sa service account should already be annotated with arn:aws:iam::AWS_ACCOUNT_ID:role/AmazonEKS_EBS_CSI_DriverRole. To check the annotation, please run:

          kubectl get sa ebs-csi-controller-sa -n kube-system -o=jsonpath='{.metadata.annotations}'
           

          In case pods have errors, restart the ebs-csi-controller deployment:

          kubectl rollout restart deployment ebs-csi-controller -n kube-system
          -
        \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/edp-access-model/index.html b/operator-guide/edp-access-model/index.html index d96da00e1..cd3286c8f 100644 --- a/operator-guide/edp-access-model/index.html +++ b/operator-guide/edp-access-model/index.html @@ -1,4 +1,4 @@ - KubeRocketCI Access Model - EPAM Delivery Platform

      KubeRocketCI Access Model⚓︎

      In KubeRocketCI, access control is implemented via authorisation methods. The regulation of both user and group permissions is facilitated through Keycloak, which in turn integrates with RBAC. Permissions for third-party tools are controlled using custom resources. This document describes the access management entities, including Kubernetes groups, custom resources, Keycloak realm roles, detailing their respective permissions and the tools they are applied to.

      Keycloak⚓︎

      This section explains what realm roles and realm groups are and how they function within Keycloak.

      Realm Roles⚓︎

      The Keycloak realm of edp has two realm roles with a composite types named administrator and developer:

      • The administrator realm role is designed for users who need administrative access to the tools used in the project. This realm role contains the sonar-administrators role. Users who are assigned the administrator realm role will be granted these two roles automatically.
      • The developer realm role, on the other hand, is designed for users who need access to the development tools used in the project. This realm role also contains the sonar-developers role. Users who are assigned the developer realm role will be granted these two roles automatically.

      These realm roles have been defined to make it easier to assign groups of rights to users.

      The table below shows the realm roles and the composite types they relate to.

      Realm Role Name Regular Role Composite role
      administrator
      developer
      sonar-administrators
      sonar-developers

      Realm Groups⚓︎

      KubeRocketCI uses two different realms for group management, edp and broker:

      • The edp realm contains two groups that are specifically used for controlling access to Argo CD. These groups are named ArgoCDAdmins and ArgoCD-edp-users.
      • The broker realm contains five groups that are used for access control in both the KubeRocketCI portal and EKS cluster. These groups are named edp-oidc-admins, edp-oidc-builders, edp-oidc-deployers,edp-oidc-developers and edp-oidc-viewers.
      Realm Group Name Realm Name
      ArgoCDAdmins edp
      ArgoCD-edp-users edp
      edp-oidc-admins broker
      edp-oidc-builders broker
      edp-oidc-deployers broker
      edp-oidc-developers broker
      edp-oidc-viewers broker

      SonarQube⚓︎

      In the case of SonarQube, there are two ways to manage access: via Keycloak and via KubeRocketCI approach. This sections describes both of the approaches.

      Manage Access via Keycloak⚓︎

      SonarQube access is managed using Keycloak roles in the edp realm. The sonar-developers and sonar-administrators realm roles are the two available roles that determine user access levels. To grant access, the corresponding role must be added to the user in Keycloak.

      For example, a user who needs developer access to SonarQube should be assigned the sonar-developers or developer composite role in Keycloak.

      KubeRocketCI Approach for Managing Access⚓︎

      KubeRocketCI provides its own SonarQube Permission Template, which is used to manage user access and permissions for SonarQube projects.

      The template is stored in the custom SonarQube resource of the operator, an example of a custom resource can be found below.

      SonarPermissionTemplate

      apiVersion: v2.edp.epam.com/v1
      + KubeRocketCI Access Model - EPAM Delivery Platform      

      KubeRocketCI Access Model⚓︎

      In KubeRocketCI, access control is implemented via authorisation methods. The regulation of both user and group permissions is facilitated through Keycloak, which in turn integrates with RBAC. Permissions for third-party tools are controlled using custom resources. This document describes the access management entities, including Kubernetes groups, custom resources, Keycloak realm roles, detailing their respective permissions and the tools they are applied to.

      Keycloak⚓︎

      This section explains what realm roles and realm groups are and how they function within Keycloak.

      Realm Roles⚓︎

      The Keycloak realm of edp has two realm roles with a composite types named administrator and developer:

      • The administrator realm role is designed for users who need administrative access to the tools used in the project. This realm role contains the sonar-administrators role. Users who are assigned the administrator realm role will be granted these two roles automatically.
      • The developer realm role, on the other hand, is designed for users who need access to the development tools used in the project. This realm role also contains the sonar-developers role. Users who are assigned the developer realm role will be granted these two roles automatically.

      These realm roles have been defined to make it easier to assign groups of rights to users.

      The table below shows the realm roles and the composite types they relate to.

      Realm Role Name Regular Role Composite role
      administrator
      developer
      sonar-administrators
      sonar-developers

      Realm Groups⚓︎

      KubeRocketCI uses two different realms for group management, edp and broker:

      • The edp realm contains two groups that are specifically used for controlling access to Argo CD. These groups are named ArgoCDAdmins and ArgoCD-edp-users.
      • The broker realm contains five groups that are used for access control in both the KubeRocketCI portal and EKS cluster. These groups are named edp-oidc-admins, edp-oidc-builders, edp-oidc-deployers,edp-oidc-developers and edp-oidc-viewers.
      Realm Group Name Realm Name
      ArgoCDAdmins edp
      ArgoCD-edp-users edp
      edp-oidc-admins broker
      edp-oidc-builders broker
      edp-oidc-deployers broker
      edp-oidc-developers broker
      edp-oidc-viewers broker

      SonarQube⚓︎

      In the case of SonarQube, there are two ways to manage access: via Keycloak and via KubeRocketCI approach. This sections describes both of the approaches.

      Manage Access via Keycloak⚓︎

      SonarQube access is managed using Keycloak roles in the edp realm. The sonar-developers and sonar-administrators realm roles are the two available roles that determine user access levels. To grant access, the corresponding role must be added to the user in Keycloak.

      For example, a user who needs developer access to SonarQube should be assigned the sonar-developers or developer composite role in Keycloak.

      KubeRocketCI Approach for Managing Access⚓︎

      KubeRocketCI provides its own SonarQube Permission Template, which is used to manage user access and permissions for SonarQube projects.

      The template is stored in the custom SonarQube resource of the operator, an example of a custom resource can be found below.

      SonarPermissionTemplate

      apiVersion: v2.edp.epam.com/v1
       kind: SonarPermissionTemplate
       metadata:
         name: edp-default
      @@ -45,4 +45,4 @@
       spec:
         accountId: user@user.com
         groupId: Administrators
      -

      After the GerritGroupMember resource is created, the user will have the permissions and access levels associated with that group.

      KubeRocketCI Portal and EKS Cluster⚓︎

      Both Portal and EKS Cluster use Keycloak groups for controlling access. Users need to be added to the required group in Keycloak to get access. The groups that are used for access control are in the broker realm.

      Note

      The broker realm is used because a Keycloak client for OIDC is in this realm.

      Keycloak Groups⚓︎

      There are two types of groups provided for users:

      • Independent group: provides the minimum required permission set.
      • Extension group: extends the rights of an independent group.

      For example, the edp-oidc-viewers group can be extended with rights from the edp-oidc-builders group.

      Group Name Independent Group Extension Group
      edp-oidc-admins
      edp-oidc-developers
      edp-oidc-viewers
      edp-oidc-builders
      edp-oidc-deployers
      Name Action List
      View Getting of all namespaced resources
      Build Starting a PipelineRun from KubeRocketCI portal
      Deploy Deploying a new version of application via Argo CD Application
      Group Name View Build Deploy Full Namespace Access
      edp-oidc-admins
      edp-oidc-developers
      edp-oidc-viewers
      edp-oidc-builders
      edp-oidc-deployers

      Note

      Originally, the edp-oidc-developer group members come solely with the permissions to initiate pipelines. Assigning them to the edp-oidc-viewers group grants necessary permissions to view pipelines in the KubeRocketCI portal.

      Cluster RBAC Resources⚓︎

      The edp namespace has five role bindings that provide the necessary permissions for the Keycloak groups described above.

      Role Binding Name Role Name Groups
      tenant-admin cluster-admin edp-oidc-admins
      tenant-builder tenant-builder edp-oidc-builders
      tenant-deployer tenant-deployer edp-oidc-deployers
      tenant-developer tenant-developer edp-oidc-developers
      tenant-viewer view edp-oidc-viewers , edp-oidc-developers

      Note

      KubeRocketCI provides an aggregate ClusterRole with permissions to view custom KubeRocketCI resources. ClusterRole is named edp-aggregate-view-edp

      Info

      The tenant-admin RoleBinding will be created in a created namespace by cd-pipeline-operator.
      tenant-admin RoleBinding assign the admin role to edp-oidc-admins and edp-oidc-developers groups.

      Grant User Access to the Created Namespaces⚓︎

      To provide users with admin or developer privileges for project namespaces, they need to be added to the edp-oidc-admins and edp-oidc-developers groups in Keycloak.

      Argo CD⚓︎

      In Argo CD, groups are specified when creating an AppProject to restrict access to deployed applications. To gain access to deployed applications within a project, the user must be added to their corresponding Argo CD group in Keycloak. This ensures that only authorized users can access and modify applications within the project.

      Info

      By default, only the ArgoCDAdmins group is automatically created in Keycloak.

      \ No newline at end of file +

      After the GerritGroupMember resource is created, the user will have the permissions and access levels associated with that group.

      KubeRocketCI Portal and EKS Cluster⚓︎

      Both Portal and EKS Cluster use Keycloak groups for controlling access. Users need to be added to the required group in Keycloak to get access. The groups that are used for access control are in the broker realm.

      Note

      The broker realm is used because a Keycloak client for OIDC is in this realm.

      Keycloak Groups⚓︎

      There are two types of groups provided for users:

      • Independent group: provides the minimum required permission set.
      • Extension group: extends the rights of an independent group.

      For example, the edp-oidc-viewers group can be extended with rights from the edp-oidc-builders group.

      Group Name Independent Group Extension Group
      edp-oidc-admins
      edp-oidc-developers
      edp-oidc-viewers
      edp-oidc-builders
      edp-oidc-deployers
      Name Action List
      View Getting of all namespaced resources
      Build Starting a PipelineRun from KubeRocketCI portal
      Deploy Deploying a new version of application via Argo CD Application
      Group Name View Build Deploy Full Namespace Access
      edp-oidc-admins
      edp-oidc-developers
      edp-oidc-viewers
      edp-oidc-builders
      edp-oidc-deployers

      Note

      Originally, the edp-oidc-developer group members come solely with the permissions to initiate pipelines. Assigning them to the edp-oidc-viewers group grants necessary permissions to view pipelines in the KubeRocketCI portal.

      Cluster RBAC Resources⚓︎

      The edp namespace has five role bindings that provide the necessary permissions for the Keycloak groups described above.

      Role Binding Name Role Name Groups
      tenant-admin cluster-admin edp-oidc-admins
      tenant-builder tenant-builder edp-oidc-builders
      tenant-deployer tenant-deployer edp-oidc-deployers
      tenant-developer tenant-developer edp-oidc-developers
      tenant-viewer view edp-oidc-viewers , edp-oidc-developers

      Note

      KubeRocketCI provides an aggregate ClusterRole with permissions to view custom KubeRocketCI resources. ClusterRole is named edp-aggregate-view-edp

      Info

      The tenant-admin RoleBinding will be created in a created namespace by cd-pipeline-operator.
      tenant-admin RoleBinding assign the admin role to edp-oidc-admins and edp-oidc-developers groups.

      Grant User Access to the Created Namespaces⚓︎

      To provide users with admin or developer privileges for project namespaces, they need to be added to the edp-oidc-admins and edp-oidc-developers groups in Keycloak.

      Argo CD⚓︎

      In Argo CD, groups are specified when creating an AppProject to restrict access to deployed applications. To gain access to deployed applications within a project, the user must be added to their corresponding Argo CD group in Keycloak. This ensures that only authorized users can access and modify applications within the project.

      Info

      By default, only the ArgoCDAdmins group is automatically created in Keycloak.

      \ No newline at end of file diff --git a/operator-guide/edp-kiosk-usage/index.html b/operator-guide/edp-kiosk-usage/index.html index 40fd46085..f8a3d1c1a 100644 --- a/operator-guide/edp-kiosk-usage/index.html +++ b/operator-guide/edp-kiosk-usage/index.html @@ -1 +1 @@ - EDP Kiosk Usage - EPAM Delivery Platform

      EDP Kiosk Usage⚓︎

      Explore the way Kiosk, a multi-tenancy extension for Kubernetes, is used in EDP.

      Prerequisites⚓︎

      • Installed Kiosk 0.2.11.

      Diagram of using Kiosk by EDP⚓︎

      Kiosk usage
      Kiosk usage

      Agenda

      • blue - created by Helm chart;
      • grey - created manually

      Usage⚓︎

      • EDP installation area on a diagram is described by following link;
      • Once the above step is executed, edp-cd-pipeline-operator service account will be linked to kiosk-edit ClusterRole to get an ability for leveraging Kiosk specific resources (e.g. Space);
      • Newly created stage in edp installation of EDP generates new Kiosk Space resource that is linked to edp Kiosk Account;
      • According to Kiosk doc the Space resource creates namespace with RoleBinding that contains relation between service account which is linked to Kiosk Account and kiosk-space-admin ClusterRole. As cd-pipeline-operator ServiceAccount is linked to Account, it has admin permissions in all generated by him namespaces.
      \ No newline at end of file + EDP Kiosk Usage - EPAM Delivery Platform

      EDP Kiosk Usage⚓︎

      Explore the way Kiosk, a multi-tenancy extension for Kubernetes, is used in EDP.

      Prerequisites⚓︎

      • Installed Kiosk 0.2.11.

      Diagram of using Kiosk by EDP⚓︎

      Kiosk usage
      Kiosk usage

      Agenda

      • blue - created by Helm chart;
      • grey - created manually

      Usage⚓︎

      • EDP installation area on a diagram is described by following link;
      • Once the above step is executed, edp-cd-pipeline-operator service account will be linked to kiosk-edit ClusterRole to get an ability for leveraging Kiosk specific resources (e.g. Space);
      • Newly created stage in edp installation of EDP generates new Kiosk Space resource that is linked to edp Kiosk Account;
      • According to Kiosk doc the Space resource creates namespace with RoleBinding that contains relation between service account which is linked to Kiosk Account and kiosk-space-admin ClusterRole. As cd-pipeline-operator ServiceAccount is linked to Account, it has admin permissions in all generated by him namespaces.
      \ No newline at end of file diff --git a/operator-guide/eks-oidc-integration/index.html b/operator-guide/eks-oidc-integration/index.html index 2f934f71e..d7c522e09 100644 --- a/operator-guide/eks-oidc-integration/index.html +++ b/operator-guide/eks-oidc-integration/index.html @@ -1,4 +1,4 @@ - EKS OIDC Integration - EPAM Delivery Platform

      EKS OIDC Integration⚓︎

      This page is a detailed guide on integrating Keycloak with the edp-keycloak-operator to serve as an identity provider for AWS Elastic Kubernetes Service (EKS). It provides step-by-step instructions for creating necessary realms, users, roles, and client configurations for a seamless Keycloak-EKS collaboration. Additionally, it includes guidelines on installing the edp-keycloak-operator using Helm charts.

      Prerequisites⚓︎

      Configure Keycloak⚓︎

      To prepare Keycloak for integration with the edp-keycloak-operator, follow the steps below:

      1. Ensure that the openshift realm is created.

      2. Create the orchestrator user and set the password in the Master realm.

      3. In the Role Mapping tab, assign the proper roles to the user:

        • Realm Roles:

          • create-realm;
          • offline_access;
          • uma_authorization.
        • Client Roles openshift-realm:

          • impersonation;
          • manage-authorization;
          • manage-clients;
          • manage-users.

      Role mappings
      Role mappings

      Install Keycloak Operator⚓︎

      To install the Keycloak operator, follow the steps below:

      1. Add the epamedp Helm chart to a local client:

        helm repo add epamedp https://epam.github.io/edp-helm-charts/stable
        + EKS OIDC Integration - EPAM Delivery Platform      

        EKS OIDC Integration⚓︎

        This page is a detailed guide on integrating Keycloak with the edp-keycloak-operator to serve as an identity provider for AWS Elastic Kubernetes Service (EKS). It provides step-by-step instructions for creating necessary realms, users, roles, and client configurations for a seamless Keycloak-EKS collaboration. Additionally, it includes guidelines on installing the edp-keycloak-operator using Helm charts.

        Prerequisites⚓︎

        Configure Keycloak⚓︎

        To prepare Keycloak for integration with the edp-keycloak-operator, follow the steps below:

        1. Ensure that the openshift realm is created.

        2. Create the orchestrator user and set the password in the Master realm.

        3. In the Role Mapping tab, assign the proper roles to the user:

          • Realm Roles:

            • create-realm;
            • offline_access;
            • uma_authorization.
          • Client Roles openshift-realm:

            • impersonation;
            • manage-authorization;
            • manage-clients;
            • manage-users.

        Role mappings
        Role mappings

        Install Keycloak Operator⚓︎

        To install the Keycloak operator, follow the steps below:

        1. Add the epamedp Helm chart to a local client:

          helm repo add epamedp https://epam.github.io/edp-helm-charts/stable
           helm repo update
           
        2. Install the Keycloak operator:

          helm install keycloak-operator epamedp/keycloak-operator --namespace security --set name=keycloak-operator
           

        Connect Keycloak Operator to Keycloak⚓︎

        The next stage after installing Keycloak is to integrate it with the Keycloak operator. It can be implemented with the following steps:

        1. Create the keycloak secret that will contain username and password to perform the integration. Set your own password. The username must be orchestrator:

          kubectl -n security create secret generic keycloak \
          @@ -108,4 +108,4 @@
               - UPDATE_PASSWORD
             groups:
               - eks-oidc-developers
          -
      2. As a result, Keycloak is integrated with the AWS Elastic Kubernetes Service. This integration enables users to log in to the EKS cluster effortlessly using their kubeconfig files while managing permissions through Keycloak.

      \ No newline at end of file +
    4. As a result, Keycloak is integrated with the AWS Elastic Kubernetes Service. This integration enables users to log in to the EKS cluster effortlessly using their kubeconfig files while managing permissions through Keycloak.

    5. \ No newline at end of file diff --git a/operator-guide/enable-irsa/index.html b/operator-guide/enable-irsa/index.html index 168a2772a..5df10da30 100644 --- a/operator-guide/enable-irsa/index.html +++ b/operator-guide/enable-irsa/index.html @@ -1,4 +1,4 @@ - Associate IAM Roles With Service Accounts - EPAM Delivery Platform

      Associate IAM Roles With Service Accounts⚓︎

      This page contains accurate information on how to associate an IAM role with the service account (IRSA) in EPAM Delivery Platform.

      Get acquainted with the AWS Official Documentation on the subject before proceeding.

      Common Configuration of IAM Roles With Service Accounts⚓︎

      To successfully associate the IAM role with the service account, follow the steps below:

      1. Create an IAM role that will further be associated with the service account. This role must have the following trust policy:

        IAM Role

        {
        + Associate IAM Roles With Service Accounts - EPAM Delivery Platform      

        Associate IAM Roles With Service Accounts⚓︎

        This page contains accurate information on how to associate an IAM role with the service account (IRSA) in EPAM Delivery Platform.

        Get acquainted with the AWS Official Documentation on the subject before proceeding.

        Common Configuration of IAM Roles With Service Accounts⚓︎

        To successfully associate the IAM role with the service account, follow the steps below:

        1. Create an IAM role that will further be associated with the service account. This role must have the following trust policy:

          IAM Role

          {
             "Version": "2012-10-17",
             "Statement": [
               {
          @@ -48,4 +48,4 @@
             "Account": "XXXXXXXXXXXX",
             "Arn": "arn:aws:sts::XXXXXXXXXXXX:assumed-role/AWSIRSATestRole/botocore-session-XXXXXXXXXX"
             }
          -

          As a result, it is possible to perform actions in AWS under the AWSIRSATestRole role.

        \ No newline at end of file +

        As a result, it is possible to perform actions in AWS under the AWSIRSATestRole role.

      \ No newline at end of file diff --git a/operator-guide/external-secrets-operator-integration/index.html b/operator-guide/external-secrets-operator-integration/index.html index 53eefc2e2..af2d2bce3 100644 --- a/operator-guide/external-secrets-operator-integration/index.html +++ b/operator-guide/external-secrets-operator-integration/index.html @@ -1,4 +1,4 @@ - External Secrets Operator Integration - EPAM Delivery Platform

      External Secrets Operator Integration⚓︎

      External Secrets Operator (ESO) can be integrated with EDP.

      There are multiple Secrets Providers that can be used within ESO. EDP is integrated with two major providers:

      EDP uses various secrets to integrate various applications. Below is a list of secrets that are used in the EPAM Delivery Platform and their description. All the secrets are encoded in Base64 format.

      Secret Name Fields Description Used by
      keycloak username

      password
      Username and password with specific rights for EDP tenant in Keycloak keycloak-operator
      ci-defectdojo token

      url
      DefectDojo token

      DefectDojo URL
      edp-tekton
      kaniko-docker-config .dockerconfigjson Serialized JSON that follows docker config patterns edp-tekton
      regcred .dockerconfigjson Serialized JSON that follows docker config patterns cd-pipeline-operator
      ci-github id_rsa

      token

      secretString
      Private key from github repo

      API token

      Random string
      edp-tekton
      ci-gitlab id_rsa

      token

      secretString
      Private key from gitlab repo

      API token

      Random string
      edp-tekton
      ci-jira username

      password
      Jira username

      Jira password
      edp-codebase-operator
      ci-sonarqube token

      url
      SonarQube token

      SonarQube URL
      edp-tekton
      ci-nexus username

      password

      url
      Nexus username

      Nexus password

      Nexus URL
      edp-tekton
      ci-dependency-track token

      url
      Dependency-Track token

      Dependency-Track URL

      edp-tekton
      oauth2-proxy-cookie-secret cookie-secret Secret key for oauth2-proxy edp-install
      keycloak-client-headlamp-secret clientSecret Secret key for keycloak client keycloak-operator
      ci-argocd token

      url
      Argo CD token

      Argo CD URL

      edp-tekton

      EDP Core Secrets⚓︎

      The list below represents the baseline required for full operation within EDP:

      • kaniko-docker-config: Used for pushing docker images to a specific registry.
      • ci-sonarqube: Used in the CI process for SonarQube integration.
      • ci-nexus: Used for pushing artifacts to the Nexus storage.

      These secrets are mandatory for Tekton pipelines to work properly.

      Kubernetes Provider⚓︎

      All secrets are stored in Kubernetes in pre-defined namespaces. EDP suggests using the following approach for secrets management:

      • EDP_NAMESPACE-vault, where EDP_NAMESPACE is a name of the namespace where EDP is deployed, such as edp-vault. This namespace is used by EDP platform. Access to secrets in the edp-vault is permitted only for EDP Administrators.
      • EDP_NAMESPACE-cicd-vault, where EDP_NAMESPACE is a name of the namespace where EDP is deployed, such as edp-cicd-vault. Development team uses access to secrets in the edp-cicd-vaultfor microservices development.

      See a diagram below for more details:

      eso-with-kubernetes

      In order to install EDP, a list of passwords must be created. Secrets are provided automatically when using ESO.

      1. Create a common namespace for secrets and EDP:

        kubectl create namespace edp-vault
        + External Secrets Operator Integration - EPAM Delivery Platform      

        External Secrets Operator Integration⚓︎

        External Secrets Operator (ESO) can be integrated with EDP.

        There are multiple Secrets Providers that can be used within ESO. EDP is integrated with two major providers:

        EDP uses various secrets to integrate various applications. Below is a list of secrets that are used in the EPAM Delivery Platform and their description. All the secrets are encoded in Base64 format.

        Secret Name Fields Description Used by
        keycloak username

        password
        Username and password with specific rights for EDP tenant in Keycloak keycloak-operator
        ci-defectdojo token

        url
        DefectDojo token

        DefectDojo URL
        edp-tekton
        kaniko-docker-config .dockerconfigjson Serialized JSON that follows docker config patterns edp-tekton
        regcred .dockerconfigjson Serialized JSON that follows docker config patterns cd-pipeline-operator
        ci-github id_rsa

        token

        secretString
        Private key from github repo

        API token

        Random string
        edp-tekton
        ci-gitlab id_rsa

        token

        secretString
        Private key from gitlab repo

        API token

        Random string
        edp-tekton
        ci-jira username

        password
        Jira username

        Jira password
        edp-codebase-operator
        ci-sonarqube token

        url
        SonarQube token

        SonarQube URL
        edp-tekton
        ci-nexus username

        password

        url
        Nexus username

        Nexus password

        Nexus URL
        edp-tekton
        ci-dependency-track token

        url
        Dependency-Track token

        Dependency-Track URL

        edp-tekton
        oauth2-proxy-cookie-secret cookie-secret Secret key for oauth2-proxy edp-install
        keycloak-client-headlamp-secret clientSecret Secret key for keycloak client keycloak-operator
        ci-argocd token

        url
        Argo CD token

        Argo CD URL

        edp-tekton

        EDP Core Secrets⚓︎

        The list below represents the baseline required for full operation within EDP:

        • kaniko-docker-config: Used for pushing docker images to a specific registry.
        • ci-sonarqube: Used in the CI process for SonarQube integration.
        • ci-nexus: Used for pushing artifacts to the Nexus storage.

        These secrets are mandatory for Tekton pipelines to work properly.

        Kubernetes Provider⚓︎

        All secrets are stored in Kubernetes in pre-defined namespaces. EDP suggests using the following approach for secrets management:

        • EDP_NAMESPACE-vault, where EDP_NAMESPACE is a name of the namespace where EDP is deployed, such as edp-vault. This namespace is used by EDP platform. Access to secrets in the edp-vault is permitted only for EDP Administrators.
        • EDP_NAMESPACE-cicd-vault, where EDP_NAMESPACE is a name of the namespace where EDP is deployed, such as edp-cicd-vault. Development team uses access to secrets in the edp-cicd-vaultfor microservices development.

        See a diagram below for more details:

        eso-with-kubernetes

        In order to install EDP, a list of passwords must be created. Secrets are provided automatically when using ESO.

        1. Create a common namespace for secrets and EDP:

          kubectl create namespace edp-vault
           kubectl create namespace edp
           
        2. Create secrets in the edp-vault namespace:

          apiVersion: v1
           kind: Secret
          @@ -195,4 +195,4 @@
           --values values.yaml \
           --namespace edp \
           --atomic
          -
        \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/github-debug-webhooks/index.html b/operator-guide/github-debug-webhooks/index.html index 212ebc3a5..809b31d0c 100644 --- a/operator-guide/github-debug-webhooks/index.html +++ b/operator-guide/github-debug-webhooks/index.html @@ -1,4 +1,4 @@ - Debug GitHub Webhooks in Jenkins - EPAM Delivery Platform

      Debug GitHub Webhooks in Jenkins⚓︎

      A webhook enables third-party services like GitHub to send real-time updates to an application. Updates are triggered by an event or an action by the webhook provider (for example, a push to a repository, a Pull Request creation), and pushed to the application via HTTP requests, namely, Jenkins.
      The GitHub Jenkins job provisioner creates a webhook in the GitHub repository during the Create release pipeline once the Integrate GitHub/GitLab in Jenkins is enabled and the GitHub Webhook Configuration is completed.

      The Jenkins setup in EDP uses the following plugins responsible for listening on GitHub webhooks:

      • GitHub plugin is configured to listen on Push events.

      In case of any issues with webhooks, try the following solutions:

      1. Check that the firewalls are configured to accept the incoming traffic from the IP address range that is described in the GitHub documentation.

      2. Check that GitHub Personal Access Token is correct and has sufficient scope permissions.

      3. Check that the job has run at least once before using the hook (once an application is created in EDP, the build job should be run automatically in Jenkins).

      4. Check that both Push and issue comment and Pull Request webhooks are created on the GitHub side (unlike GitLab, GitHub does not need separate webhooks for each branch):

        • Go to the GitHub repository -> Settings -> Webhooks.

        Webhooks settings
        Webhooks settings

      5. Click each webhook and check if the event delivery is successful:

        • The URL payload must be https://jenkins-the-host.com/github-webhook/ for the GitHub plugin and https://jenkins-the-host.com/ghprbhook/ for the GitHub Pull Request Builder.
        • The content type must be application/json for Push events and application/x-www-form-urlencoded for Pull Request events.
        • The html_url in the Payload request must match the repository URL and be without .git at the end of the URL.
      6. Check that the X-Hub-Signature secret is verified. It is provided by the Jenkins GitHub plugin for Push events and by the GitHub Pull Request Builder plugin for Pull Request events. The Secret field is optional. Nevertheless, if incorrect, it can prevent webhook events.

        For the GitHub plugin (Push events):

        • Go to Jenkins -> Manage Jenkins -> Configure System, and find the GitHub plugin section.
        • Select Advanced -> Shared secrets to add the secret via the Jenkins Credentials Provider.

        For the GitHub Pull Request Builder (Pull Request events):

        • Go to Jenkins -> Manage Jenkins -> Configure System, and find the GitHub Pull Request Builder plugin section.
        • Check Shared secret that can be added manually.
      7. Redeliver events by clicking the Redeliver button and check the Response body.

        Manage webhook
        Manage webhook

        Note

        Use Postman to debug webhooks.
        Add all headers to Postman from the webhook Request -> Headers field and send the payload (Request body) using the appropriate content type.

        Examples for Push and Pull Request events:

        GitHub plugin push events
        Postman push event payload headers
        GitHub plugin push events
        GitHub plugin push events

        The response in the Jenkins log:

        Jan 17, 2022 8:51:14 AM INFO org.jenkinsci.plugins.github.webhook.subscriber.PingGHEventSubscriber onEvent
        + Debug GitHub Webhooks in Jenkins - EPAM Delivery Platform      

        Debug GitHub Webhooks in Jenkins⚓︎

        A webhook enables third-party services like GitHub to send real-time updates to an application. Updates are triggered by an event or an action by the webhook provider (for example, a push to a repository, a Pull Request creation), and pushed to the application via HTTP requests, namely, Jenkins.
        The GitHub Jenkins job provisioner creates a webhook in the GitHub repository during the Create release pipeline once the Integrate GitHub/GitLab in Jenkins is enabled and the GitHub Webhook Configuration is completed.

        The Jenkins setup in EDP uses the following plugins responsible for listening on GitHub webhooks:

        • GitHub plugin is configured to listen on Push events.

        In case of any issues with webhooks, try the following solutions:

        1. Check that the firewalls are configured to accept the incoming traffic from the IP address range that is described in the GitHub documentation.

        2. Check that GitHub Personal Access Token is correct and has sufficient scope permissions.

        3. Check that the job has run at least once before using the hook (once an application is created in EDP, the build job should be run automatically in Jenkins).

        4. Check that both Push and issue comment and Pull Request webhooks are created on the GitHub side (unlike GitLab, GitHub does not need separate webhooks for each branch):

          • Go to the GitHub repository -> Settings -> Webhooks.

          Webhooks settings
          Webhooks settings

        5. Click each webhook and check if the event delivery is successful:

          • The URL payload must be https://jenkins-the-host.com/github-webhook/ for the GitHub plugin and https://jenkins-the-host.com/ghprbhook/ for the GitHub Pull Request Builder.
          • The content type must be application/json for Push events and application/x-www-form-urlencoded for Pull Request events.
          • The html_url in the Payload request must match the repository URL and be without .git at the end of the URL.
        6. Check that the X-Hub-Signature secret is verified. It is provided by the Jenkins GitHub plugin for Push events and by the GitHub Pull Request Builder plugin for Pull Request events. The Secret field is optional. Nevertheless, if incorrect, it can prevent webhook events.

          For the GitHub plugin (Push events):

          • Go to Jenkins -> Manage Jenkins -> Configure System, and find the GitHub plugin section.
          • Select Advanced -> Shared secrets to add the secret via the Jenkins Credentials Provider.

          For the GitHub Pull Request Builder (Pull Request events):

          • Go to Jenkins -> Manage Jenkins -> Configure System, and find the GitHub Pull Request Builder plugin section.
          • Check Shared secret that can be added manually.
        7. Redeliver events by clicking the Redeliver button and check the Response body.

          Manage webhook
          Manage webhook

          Note

          Use Postman to debug webhooks.
          Add all headers to Postman from the webhook Request -> Headers field and send the payload (Request body) using the appropriate content type.

          Examples for Push and Pull Request events:

          GitHub plugin push events
          Postman push event payload headers
          GitHub plugin push events
          GitHub plugin push events

          The response in the Jenkins log:

          Jan 17, 2022 8:51:14 AM INFO org.jenkinsci.plugins.github.webhook.subscriber.PingGHEventSubscriber onEvent
           PING webhook received from repo <https://github.com/user-profile/user-repo>!
           

          GitHub pull request builder
          Postman pull request event payload headers
          GitHub pull request builder
          GitHub pull request builder

          The response in the Jenkins log:

          Jan 17, 2022 8:17:53 AM FINE org.jenkinsci.plugins.ghprb.GhprbRootAction
           Got payload event: ping
          @@ -7,4 +7,4 @@
           node {
               git credentialsId: 'github-sshkey', url: 'https://github.com/someone/something.git', branch: 'master'
           }
          -

          Push events may not work correctly with the Job Pipeline script from SCM option in the current version of the GitHub plugin 1.34.1.

      \ No newline at end of file +

      Push events may not work correctly with the Job Pipeline script from SCM option in the current version of the GitHub plugin 1.34.1.

      \ No newline at end of file diff --git a/operator-guide/gitlab-debug-webhooks/index.html b/operator-guide/gitlab-debug-webhooks/index.html index a15e1e770..acccfdfb2 100644 --- a/operator-guide/gitlab-debug-webhooks/index.html +++ b/operator-guide/gitlab-debug-webhooks/index.html @@ -1,7 +1,7 @@ - Debug GitLab Webhooks in Jenkins - EPAM Delivery Platform

      Debug GitLab Webhooks in Jenkins⚓︎

      A webhook enables third-party services like GitLab to send real-time updates to the application. Updates are triggered by an event or action by the webhook provider (for example, a push to a repository, a Merge Request creation), and pushed to the application via the HTTP requests, namely, Jenkins.
      The GitLab Jenkins job provisioner creates a webhook in the GitLab repository during the Create release pipeline once the Integrate GitHub/GitLab in Jenkins is enabled and the GitLab Integration is completed.

      The Jenkins setup in EDP uses the GitLab plugin responsible for listening on GitLab webhook Push and Merge Request events.

      In case of any issues with webhooks, try the following solutions:

      1. Check that the firewalls are configured to accept incoming traffic from the IP address range that is described in the GitLab documentation.

      2. Check that GitLab Personal Access Token is correct and has the api scope. If you have used the Project Access Token, make sure that the role is Owner or Maintainer, and it has the api scope.

      3. Check that the job has run at least once before using the hook (once an application is created in EDP, the build job should be run automatically in Jenkins).

      4. Check that both Push Events, Note Events and Merge Requests Events, Note Events webhooks are created on the GitLab side for each branch (unlike GitHub, GitLab must have separate webhooks for each branch).

        • Go to the GitLab repository -> Settings -> Webhooks:

        Webhooks list
        Webhooks list

      5. Click Edit next to each webhook and check if the event delivery is successful. If the webhook is sent, the Recent Deliveries list becomes available. Click View details.

        Webhooks settings
        Webhooks settings

        • The URL payload must be similar to the job URL on Jenkins. For example:
          https://jenkins-server.com/project/project-name/MAIN-Build-job is for the Push events.
          https://jenkins-server.com/project/project-name/MAIN-Code-review-job is for the Merge Request events.
        • The content type must be application/json for both events.
        • The "web_url" in the Request body must match the repository URL.
        • Project "web_url", "path_with_namespace", "homepage" links must be without .git at the end of the URL.
      6. Verify the Secret token (X-Gitlab-Token). This token comes from the Jenkins job due to the Jenkins GitLab Plugin and is created by our Job Provisioner:

        • Go to the Jenkins job and select Configure.
        • Select Advanced under the Build Triggers and check the Secret token.

        Secret token is optional and can be empty. Nevertheless, if incorrect, it can prevent webhook events.

      7. Redeliver events by clicking the Resend Request button and check the Response body.

        Note

        Use Postman to debug webhooks.
        Add all headers to Postman from the webhook Request Headers field and send the payload (Request body) using the appropriate content type.

        Examples for Push and Merge Request events:

        Push request build pipeline
        Postman push request payload headers
        Push request build pipeline
        Push request build pipeline

        The response in the Jenkins log:

        Jan 17, 2022 11:26:34 AM INFO com.dabsquared.gitlabjenkins.webhook.GitLabWebHook getDynamic
        + Debug GitLab Webhooks in Jenkins - EPAM Delivery Platform      

        Debug GitLab Webhooks in Jenkins⚓︎

        A webhook enables third-party services like GitLab to send real-time updates to the application. Updates are triggered by an event or action by the webhook provider (for example, a push to a repository, a Merge Request creation), and pushed to the application via the HTTP requests, namely, Jenkins.
        The GitLab Jenkins job provisioner creates a webhook in the GitLab repository during the Create release pipeline once the Integrate GitHub/GitLab in Jenkins is enabled and the GitLab Integration is completed.

        The Jenkins setup in EDP uses the GitLab plugin responsible for listening on GitLab webhook Push and Merge Request events.

        In case of any issues with webhooks, try the following solutions:

        1. Check that the firewalls are configured to accept incoming traffic from the IP address range that is described in the GitLab documentation.

        2. Check that GitLab Personal Access Token is correct and has the api scope. If you have used the Project Access Token, make sure that the role is Owner or Maintainer, and it has the api scope.

        3. Check that the job has run at least once before using the hook (once an application is created in EDP, the build job should be run automatically in Jenkins).

        4. Check that both Push Events, Note Events and Merge Requests Events, Note Events webhooks are created on the GitLab side for each branch (unlike GitHub, GitLab must have separate webhooks for each branch).

          • Go to the GitLab repository -> Settings -> Webhooks:

          Webhooks list
          Webhooks list

        5. Click Edit next to each webhook and check if the event delivery is successful. If the webhook is sent, the Recent Deliveries list becomes available. Click View details.

          Webhooks settings
          Webhooks settings

          • The URL payload must be similar to the job URL on Jenkins. For example:
            https://jenkins-server.com/project/project-name/MAIN-Build-job is for the Push events.
            https://jenkins-server.com/project/project-name/MAIN-Code-review-job is for the Merge Request events.
          • The content type must be application/json for both events.
          • The "web_url" in the Request body must match the repository URL.
          • Project "web_url", "path_with_namespace", "homepage" links must be without .git at the end of the URL.
        6. Verify the Secret token (X-Gitlab-Token). This token comes from the Jenkins job due to the Jenkins GitLab Plugin and is created by our Job Provisioner:

          • Go to the Jenkins job and select Configure.
          • Select Advanced under the Build Triggers and check the Secret token.

          Secret token is optional and can be empty. Nevertheless, if incorrect, it can prevent webhook events.

        7. Redeliver events by clicking the Resend Request button and check the Response body.

          Note

          Use Postman to debug webhooks.
          Add all headers to Postman from the webhook Request Headers field and send the payload (Request body) using the appropriate content type.

          Examples for Push and Merge Request events:

          Push request build pipeline
          Postman push request payload headers
          Push request build pipeline
          Push request build pipeline

          The response in the Jenkins log:

          Jan 17, 2022 11:26:34 AM INFO com.dabsquared.gitlabjenkins.webhook.GitLabWebHook getDynamic
           WebHook call ed with url: /project/project-name/MAIN-Build-job
           Jan 17, 2022 11:26:34 AM INFO com.dabsquared.gitlabjenkins.trigger.handler.AbstractWebHookTriggerHandler handle
           project-name/MAIN-Build-job triggered for push.
           

          Merge request code review pipeline
          Postman merge request payload headers
          Merge request code review pipeline
          Merge request code review pipeline

          The response in the Jenkins log:

          Jan 17, 2022 11:14:58 AM INFO com.dabsquared.gitlabjenkins.webhook.GitLabWebHook getDynamic
           WebHook called with url: /project/project-name/MAIN-Code-review-job
          -
        8. Check that the repository pushing to Jenkins and the repository(ies) in the pipeline Job are lined up. GitLab Connection must be defined in the job settings.

        9. Check that the settings in the Build Triggers for the Build job are as follows:

          Build triggers build pipeline
          Build triggers build pipeline

        10. Check that the settings in the Build Triggers for the Code Review job are as follows:

          Build triggers code review pipeline
          Build triggers code review pipeline

        11. Filter through Jenkins log by using Jenkins custom log recorder:

          • Go to Manage Jenkins -> System Log -> Add new log recorder.
          • The Push and Merge Request events for the GitLab:

            Logger Log Level
            com.dabsquared.gitlabjenkins.webhook.GitLabWebHook ALL
            com.dabsquared.gitlabjenkins.trigger.handler.AbstractWebHookTriggerHandler ALL
            com.dabsquared.gitlabjenkins.trigger.handler.merge.MergeRequestHookTriggerHandlerImpl ALL
            com.dabsquared.gitlabjenkins.util.CommitStatusUpdater ALL
        \ No newline at end of file +
      8. Check that the repository pushing to Jenkins and the repository(ies) in the pipeline Job are lined up. GitLab Connection must be defined in the job settings.

      9. Check that the settings in the Build Triggers for the Build job are as follows:

        Build triggers build pipeline
        Build triggers build pipeline

      10. Check that the settings in the Build Triggers for the Code Review job are as follows:

        Build triggers code review pipeline
        Build triggers code review pipeline

      11. Filter through Jenkins log by using Jenkins custom log recorder:

        • Go to Manage Jenkins -> System Log -> Add new log recorder.
        • The Push and Merge Request events for the GitLab:

          Logger Log Level
          com.dabsquared.gitlabjenkins.webhook.GitLabWebHook ALL
          com.dabsquared.gitlabjenkins.trigger.handler.AbstractWebHookTriggerHandler ALL
          com.dabsquared.gitlabjenkins.trigger.handler.merge.MergeRequestHookTriggerHandlerImpl ALL
          com.dabsquared.gitlabjenkins.util.CommitStatusUpdater ALL
      \ No newline at end of file diff --git a/operator-guide/harbor-oidc/index.html b/operator-guide/harbor-oidc/index.html index 5f9b31a91..720d053f1 100644 --- a/operator-guide/harbor-oidc/index.html +++ b/operator-guide/harbor-oidc/index.html @@ -1,4 +1,4 @@ - OIDC in Harbor - EPAM Delivery Platform

      Harbor OIDC Configuration⚓︎

      This page provides instructions for configuring OIDC authorization for Harbor. This enables the use of Single Sign-On (SSO) for authorization in Harbor and allows centralized control over user access and rights through a single configuration point.

      Prerequisites⚓︎

      Before the beginning, ensure your cluster meets the following requirements:

      Configure Keycloak⚓︎

      To start from, configure Keycloak by creating two Kubernetes resources. Follow the steps below to succeed:

      1. Generate the keycloak-client-harbor-secret for Keycloak using either the commands below or using the External Secrets Operator:

        keycloak_client_harbor_secret=$(openssl rand -base64 32 | head -c 32)
        + OIDC in Harbor - EPAM Delivery Platform      

        Harbor OIDC Configuration⚓︎

        This page provides instructions for configuring OIDC authorization for Harbor. This enables the use of Single Sign-On (SSO) for authorization in Harbor and allows centralized control over user access and rights through a single configuration point.

        Prerequisites⚓︎

        Before the beginning, ensure your cluster meets the following requirements:

        Configure Keycloak⚓︎

        To start from, configure Keycloak by creating two Kubernetes resources. Follow the steps below to succeed:

        1. Generate the keycloak-client-harbor-secret for Keycloak using either the commands below or using the External Secrets Operator:

          keycloak_client_harbor_secret=$(openssl rand -base64 32 | head -c 32)
           
          kubectl -n edp create secret generic keycloak-client-harbor-secret \
               --from-literal=cookie-secret=${keycloak_client_harbor_secret}
           
        2. Create the KeycloakClient custom resource by applying the HarborKeycloakClient.yaml file in the edp namespace. This custom resource will use the keycloak-client-harbor-secret to include the harbor client. After the download, you will receive the created harbor client, and the password that is actually the value of the Kubernetes secret from the step 1:

          View: HarborKeycloakClient.yaml
          apiVersion: v1.edp.epam.com/v1
          @@ -39,4 +39,4 @@
           verify_certificate: true
           oidc_auto_onboard: true
           oidc_user_claim: preferred_username
          -

          Harbor Authentication Configuration
          Harbor Authentication Configuration

        As a result, users will be prompted to authenticate themselves when logging in to Harbor UI.

        \ No newline at end of file +

        Harbor Authentication Configuration
        Harbor Authentication Configuration

      As a result, users will be prompted to authenticate themselves when logging in to Harbor UI.

      \ No newline at end of file diff --git a/operator-guide/headlamp-oidc/index.html b/operator-guide/headlamp-oidc/index.html index e44eda998..ca5f6ac39 100644 --- a/operator-guide/headlamp-oidc/index.html +++ b/operator-guide/headlamp-oidc/index.html @@ -1,4 +1,4 @@ - Headlamp OIDC - EPAM Delivery Platform

      Headlamp OIDC Configuration⚓︎

      This page provides the instructions of configuring the OIDC authorization for EDP Portal UI, thus allowing using SSO for authorization in Portal and controlling user access and rights from one configuration point.

      Prerequisites⚓︎

      Ensure the following values are set first before starting the Portal OIDC configuration:

      1. realm_id = openshift

      2. client_id = kubernetes

      3. keycloak_client_key= keycloak_client_secret_key (received from: Openshift realm -> clients -> kubernetes -> Credentials -> Client secret)

      4. group = edp-oidc-admins, edp-oidc-builders, edp-oidc-deployers, edp-oidc-developers, edp-oidc-viewers (Should be created manually in the realm from point 1)

      Note

      The values indicated above are the result of the Keycloak configuration as an OIDC identity provider. To receive them, follow the instructions on the Keycloak OIDC EKS Configuration page.

      Configure Keycloak⚓︎

      To proceed with the Keycloak configuration, perform the following:

      1. Add the URL of the Headlamp to the valid_redirect_uris variable in Keycloak:

        View: keycloak_openid_client
          valid_redirect_uris = [
        + Headlamp OIDC - EPAM Delivery Platform      

        Headlamp OIDC Configuration⚓︎

        This page provides the instructions of configuring the OIDC authorization for EDP Portal UI, thus allowing using SSO for authorization in Portal and controlling user access and rights from one configuration point.

        Prerequisites⚓︎

        Ensure the following values are set first before starting the Portal OIDC configuration:

        1. realm_id = openshift

        2. client_id = kubernetes

        3. keycloak_client_key= keycloak_client_secret_key (received from: Openshift realm -> clients -> kubernetes -> Credentials -> Client secret)

        4. group = edp-oidc-admins, edp-oidc-builders, edp-oidc-deployers, edp-oidc-developers, edp-oidc-viewers (Should be created manually in the realm from point 1)

        Note

        The values indicated above are the result of the Keycloak configuration as an OIDC identity provider. To receive them, follow the instructions on the Keycloak OIDC EKS Configuration page.

        Configure Keycloak⚓︎

        To proceed with the Keycloak configuration, perform the following:

        1. Add the URL of the Headlamp to the valid_redirect_uris variable in Keycloak:

          View: keycloak_openid_client
            valid_redirect_uris = [
               "https://edp-headlamp-edp.<dns_wildcard>/*"
               "http://localhost:8000/*"
             ]
          @@ -14,4 +14,4 @@
             config:
               oidc:
                 enabled: true
          -
        2. Navigate to Headlamp and log in by clicking the Sign In button:

          Headlamp login page
          Headlamp login page

        3. Go to EDP section -> Account -> Settings, and set up a namespace:

          Headlamp namespace settings
          Headlamp namespace settings

        As a result, it is possible to control access and rights from the Keycloak endpoint.

        \ No newline at end of file +
      2. Navigate to Headlamp and log in by clicking the Sign In button:

        Headlamp login page
        Headlamp login page

      3. Go to EDP section -> Account -> Settings, and set up a namespace:

        Headlamp namespace settings
        Headlamp namespace settings

      As a result, it is possible to control access and rights from the Keycloak endpoint.

      \ No newline at end of file diff --git a/operator-guide/import-strategy-tekton/index.html b/operator-guide/import-strategy-tekton/index.html index ee1701574..6489bcd6b 100644 --- a/operator-guide/import-strategy-tekton/index.html +++ b/operator-guide/import-strategy-tekton/index.html @@ -1,4 +1,4 @@ - Integrate GitHub/GitLab in Tekton - EPAM Delivery Platform

      Integrate GitHub/GitLab in Tekton⚓︎

      This page describes how to integrate EDP with GitLab or GitHub Version Control System.

      Integration Procedure⚓︎

      To start from, it is required to add both Secret with SSH key, API token, and GitServer resources by taking the steps below.

      1. Generate an SSH key pair and add a public key to GitLab or GitHub account.

        ssh-keygen -t ed25519 -C "email@example.com"
        + Integrate GitHub/GitLab in Tekton - EPAM Delivery Platform      

        Integrate GitHub/GitLab in Tekton⚓︎

        This page describes how to integrate EDP with GitLab or GitHub Version Control System.

        Integration Procedure⚓︎

        To start from, it is required to add both Secret with SSH key, API token, and GitServer resources by taking the steps below.

        1. Generate an SSH key pair and add a public key to GitLab or GitHub account.

          ssh-keygen -t ed25519 -C "email@example.com"
           
        2. Generate access token for GitLab or GitHub account with read/write access to the API. Both personal and project access tokens are applicable.

          To create access token in GitHub, follow the steps below:

          • Log in to GitHub.
          • Click the profile account and navigate to Settings -> Developer Settings.
          • Select Personal access tokens (classic) and generate a new token with the following parameters:

          Repo permission
          Repo permission

          Note

          The access below is required for the GitHub Pull Request Builder plugin to get Pull Request commits, their status, and author info.

          Admin:repo permission
          Admin:repo permission
          Admin:org permission
          Admin:org permission
          User permission
          User permission

          Warning

          Make sure to save a new personal access token because it won`t be displayed later.

          To create access token in GitLab, follow the steps below:

          • Log in to GitLab.
          • In the top-right corner, click the avatar and select Settings.
          • On the User Settings menu, select Access Tokens.
          • Choose a name and an optional expiry date for the token.
          • In the Scopes block, select the api scope for the token.

          Personal access tokens
          Personal access tokens

          • Click the Create personal access token button.

          Note

          Make sure to save the access token as there will not be any ability to access it once again.

          In case you want to create a project access token instead of a personal one, take the following steps:

          • Log in to GitLab and navigate to the project.
          • On the User Settings menu, select Access Tokens.
          • Choose a name and an optional expiry date for the token.
          • Choose a role: Owner or Maintainer.
          • In the Scopes block, select the api scope for the token.

          Project access tokens
          Project access tokens

          • Click the Create project access token button.
        3. Create a secret in the edp namespace for the Git account with the id_rsa, username, and token fields. Take the following template as an example (use ci-gitlab instead of ci-github for GitLab):

          Navigate to EDP Portal -> EDP -> Configuration -> Git Servers. Fill in the required fields:

          VCS Integration in EDP portal
          Project access tokens

          Create a manifest file called secret.yaml with the following content filled in:

          kubectl apply -f - <<EOF
                 apiVersion: v1
                 kind: Secret
          @@ -13,4 +13,4 @@
                   username: git
                   token: <your_github_access_token>
           EOF
          -

        As a result, you will be able to create codebases using an integrated Version Control System.

        \ No newline at end of file +

      As a result, you will be able to create codebases using an integrated Version Control System.

      \ No newline at end of file diff --git a/operator-guide/index.html b/operator-guide/index.html index f530edfc9..1d5b1f8c9 100644 --- a/operator-guide/index.html +++ b/operator-guide/index.html @@ -1 +1 @@ - Overview - EPAM Delivery Platform

      Overview⚓︎

      The EDP Operator guide is intended for DevOps and provides information on EDP installation, configuration and customization, as well as the platform support. Inspect the documentation to adjust the EPAM Delivery Platform according to your business needs:

      • The Configuration section indicates the options to set the project, backup, Tekton and logging.
      • The Integration section comprises the AWS, Jira, and Logsight integration options.
      \ No newline at end of file + Overview - EPAM Delivery Platform

      Overview⚓︎

      The EDP Operator guide is intended for DevOps and provides information on EDP installation, configuration and customization, as well as the platform support. Inspect the documentation to adjust the EPAM Delivery Platform according to your business needs:

      • The Configuration section indicates the options to set the project, backup, Tekton and logging.
      • The Integration section comprises the AWS, Jira, and Logsight integration options.
      \ No newline at end of file diff --git a/operator-guide/install-argocd/index.html b/operator-guide/install-argocd/index.html index bd972230c..06971da90 100644 --- a/operator-guide/install-argocd/index.html +++ b/operator-guide/install-argocd/index.html @@ -1,4 +1,4 @@ - Install Argo CD - EPAM Delivery Platform

      Install Argo CD⚓︎

      Inspect the prerequisites and the main steps to perform for enabling Argo CD in EDP.

      Prerequisites⚓︎

      The following tools must be installed:

      Installation⚓︎

      Argo CD enablement for EDP consists of two major steps:

      • Argo CD installation
      • Argo CD integration with EDP (SSO enablement, codebase onboarding, etc.)

      Argo CD can be installed in several ways, please follow the official documentation for more details. It is also possible to install Argo CD using the edp-cluster-add-ons.

      Install With Helm⚓︎

      Follow the steps below to install Argo CD using Helm:

      For the OpenShift users:

      When using the OpenShift platform, apply the SecurityContextConstraints resource. Change the namespace in the users section if required.

      allowHostDirVolumePlugin: false
      + Install Argo CD - EPAM Delivery Platform      

      Install Argo CD⚓︎

      Inspect the prerequisites and the main steps to perform for enabling Argo CD in EDP.

      Prerequisites⚓︎

      The following tools must be installed:

      Installation⚓︎

      Argo CD enablement for EDP consists of two major steps:

      • Argo CD installation
      • Argo CD integration with EDP (SSO enablement, codebase onboarding, etc.)

      Argo CD can be installed in several ways, please follow the official documentation for more details. It is also possible to install Argo CD using the edp-cluster-add-ons.

      Install With Helm⚓︎

      Follow the steps below to install Argo CD using Helm:

      For the OpenShift users:

      When using the OpenShift platform, apply the SecurityContextConstraints resource. Change the namespace in the users section if required.

      allowHostDirVolumePlugin: false
       allowHostIPC: false
       allowHostNetwork: false
       allowHostPID: false
      @@ -128,4 +128,4 @@
       
    6. Run the installation:

      kubectl create ns argocd
       helm repo add argo https://argoproj.github.io/argo-helm
       helm install argo --version 5.33.1 argo/argo-cd -f values.yaml -n argocd
      -
    7. \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/install-defectdojo/index.html b/operator-guide/install-defectdojo/index.html index 3501e1d18..49ddce441 100644 --- a/operator-guide/install-defectdojo/index.html +++ b/operator-guide/install-defectdojo/index.html @@ -1,4 +1,4 @@ - Install DefectDojo - EPAM Delivery Platform

      Install DefectDojo⚓︎

      Inspect the main steps to perform for installing DefectDojo via Helm Chart.

      Info

      It is also possible to install DefectDojo using the EDP addons approach. For details, please refer to the EDP addons approach.

      Prerequisites⚓︎

      • Kubectl version 1.26.0 is installed.
      • Helm version 3.12.0+ is installed.

      Installation⚓︎

      Info

      Please refer to the DefectDojo Helm Chart and Deploy DefectDojo into the Kubernetes cluster sections for details.

      To install DefectDojo, follow the steps below:

      1. Check that a security namespace is created. If not, run the following command to create it:

        kubectl create namespace defectdojo
        + Install DefectDojo - EPAM Delivery Platform      

        Install DefectDojo⚓︎

        Inspect the main steps to perform for installing DefectDojo via Helm Chart.

        Info

        It is also possible to install DefectDojo using the EDP addons approach. For details, please refer to the EDP addons approach.

        Prerequisites⚓︎

        • Kubectl version 1.26.0 is installed.
        • Helm version 3.12.0+ is installed.

        Installation⚓︎

        Info

        Please refer to the DefectDojo Helm Chart and Deploy DefectDojo into the Kubernetes cluster sections for details.

        To install DefectDojo, follow the steps below:

        1. Check that a security namespace is created. If not, run the following command to create it:

          kubectl create namespace defectdojo
           

          For the OpenShift users:

          When using the OpenShift platform, install the SecurityContextConstraints resource. In case of using a custom namespace for defectdojo, change the namespace in the users section.

          defectdojo-scc.yaml
          allowHostDirVolumePlugin: false
           allowHostIPC: false
           allowHostNetwork: false
          @@ -123,4 +123,4 @@
             "url": "https://defectdojo.example.com",
             "token": "XXXXXXXXXXXX"
           }
          -
          Go to EDP Portal -> EDP -> Configuration -> DefectDojo and see the Managed by External Secret message.

          More details about the External Secrets Operator integration procedure can be found in the External Secrets Operator Integration page.

        After following the instructions provided, you should be able to integrate your DefectDojo with the EPAM Delivery Platform using one of the few available scenarios.

      \ No newline at end of file +
      Go to EDP Portal -> EDP -> Configuration -> DefectDojo and see the Managed by External Secret message.

      More details about the External Secrets Operator integration procedure can be found in the External Secrets Operator Integration page.

      After following the instructions provided, you should be able to integrate your DefectDojo with the EPAM Delivery Platform using one of the few available scenarios.

      \ No newline at end of file diff --git a/operator-guide/install-edp/index.html b/operator-guide/install-edp/index.html index 5bdb96aac..268774ee2 100644 --- a/operator-guide/install-edp/index.html +++ b/operator-guide/install-edp/index.html @@ -1,4 +1,4 @@ - Install KubeRocketCI - EPAM Delivery Platform

      Install KubeRocketCI⚓︎

      Inspect the main steps to install EPAM Delivery Platform. Please check the Prerequisites Overview page before starting the installation. Also to authenticate each of the release artifacts, please refer to the Verification of EDP Artifacts guide. There are two recommended ways to deploy EPAM Delivery Platform:

      Note

      The installation process below is given for a Kubernetes cluster. The steps that differ for an OpenShift cluster are indicated in the notes.

      Disclaimer

      KubeRocketCI is aligned with industry standards for storing and managing sensitive data, ensuring optimal security. However, the use of custom solutions introduces uncertainties, thus the responsibility for the safety of your data is totally covered by platform administrator.

      1. KubeRocketCI manages secrets via External Secret Operator to integrate with a multitude of utilities. For insights into the secrets in use and their utilization, refer to the provided External Secrets Operator Integration.

      2. Create the edp namespace.

        kubectl create namespace edp
        + Install KubeRocketCI - EPAM Delivery Platform      

        Install KubeRocketCI⚓︎

        Inspect the main steps to install EPAM Delivery Platform. Please check the Prerequisites Overview page before starting the installation. Also to authenticate each of the release artifacts, please refer to the Verification of EDP Artifacts guide. There are two recommended ways to deploy EPAM Delivery Platform:

        Note

        The installation process below is given for a Kubernetes cluster. The steps that differ for an OpenShift cluster are indicated in the notes.

        Disclaimer

        KubeRocketCI is aligned with industry standards for storing and managing sensitive data, ensuring optimal security. However, the use of custom solutions introduces uncertainties, thus the responsibility for the safety of your data is totally covered by platform administrator.

        1. KubeRocketCI manages secrets via External Secret Operator to integrate with a multitude of utilities. For insights into the secrets in use and their utilization, refer to the provided External Secrets Operator Integration.

        2. Create the edp namespace.

          kubectl create namespace edp
           

          Note

          For an OpenShift cluster, run the oc command instead of the kubectl one.

        3. (Optional) Deploy and configure Keycloak to enable Single Sign-On approach. To see the details on how to configure Keycloak correctly, please refer to the Install Keycloak page.

        4. Add the Helm charts repository:

          helm repo add epamedp https://epam.github.io/edp-helm-charts/stable
           
        5. Choose the required Helm chart version:

          helm search repo epamedp/edp-install
           NAME                    CHART VERSION   APP VERSION     DESCRIPTION
          @@ -147,4 +147,4 @@
                 enabled: false
           

          Note

          Set global.platform=openshift while deploying KubeRocketCI in OpenShift.

          Info

          The full installation with integration between tools will take at least 10 minutes.

        6. To check if the installation is successful, run the command below:

          helm status edp -n edp
           

          You can also check ingress endpoints to enter KubeRocketCI user interface:

          kubectl describe ingress -n edp
          -
        7. Once KubeRocketCI is successfully installed, you can navigate to our Use Cases to try out KubeRocketCI functionality.

        \ No newline at end of file +
      3. Once KubeRocketCI is successfully installed, you can navigate to our Use Cases to try out KubeRocketCI functionality.

      \ No newline at end of file diff --git a/operator-guide/install-external-secrets-operator/index.html b/operator-guide/install-external-secrets-operator/index.html index 10ee66e16..1fba9324d 100644 --- a/operator-guide/install-external-secrets-operator/index.html +++ b/operator-guide/install-external-secrets-operator/index.html @@ -1,8 +1,8 @@ - Install External Secrets Operator - EPAM Delivery Platform

      Install External Secrets Operator⚓︎

      Inspect the prerequisites and the main steps to perform for enabling External Secrets Operator in EDP.

      Prerequisites⚓︎

      Installation⚓︎

      To install External Secrets Operator with Helm, run the following commands:

      helm repo add external-secrets https://charts.external-secrets.io
      + Install External Secrets Operator - EPAM Delivery Platform      

      Install External Secrets Operator⚓︎

      Inspect the prerequisites and the main steps to perform for enabling External Secrets Operator in EDP.

      Prerequisites⚓︎

      Installation⚓︎

      To install External Secrets Operator with Helm, run the following commands:

      helm repo add external-secrets https://charts.external-secrets.io
       
       helm install external-secrets \
          external-secrets/external-secrets \
           --version 0.9.9 \
           -n external-secrets \
           --create-namespace
      -

      Info

      It is also possible to install External Secrets Operator using the Cluster Add-Ons or Operator Lifecycle Manager (OLM).

      \ No newline at end of file +

      Info

      It is also possible to install External Secrets Operator using the Cluster Add-Ons or Operator Lifecycle Manager (OLM).

      \ No newline at end of file diff --git a/operator-guide/install-harbor/index.html b/operator-guide/install-harbor/index.html index c64f86afb..5363bd4b0 100644 --- a/operator-guide/install-harbor/index.html +++ b/operator-guide/install-harbor/index.html @@ -1,4 +1,4 @@ - Install Harbor - EPAM Delivery Platform

      Install Harbor⚓︎

      EPAM Delivery Platform uses Harbor as a storage for application images that are created when building applications.

      Inspect the prerequisites and the main steps to perform for enabling Harbor in EDP.

      Prerequisites⚓︎

      • Kubectl version 1.26.0 is installed.
      • Helm version 3.12.0+ is installed.

      Installation⚓︎

      To install Harbor with Helm, follow the steps below:

      1. Create a namespace for Harbor:

        kubectl create namespace harbor
        + Install Harbor - EPAM Delivery Platform      

        Install Harbor⚓︎

        EPAM Delivery Platform uses Harbor as a storage for application images that are created when building applications.

        Inspect the prerequisites and the main steps to perform for enabling Harbor in EDP.

        Prerequisites⚓︎

        • Kubectl version 1.26.0 is installed.
        • Helm version 3.12.0+ is installed.

        Installation⚓︎

        To install Harbor with Helm, follow the steps below:

        1. Create a namespace for Harbor:

          kubectl create namespace harbor
           
        2. Create a secret for administrator user and registry:

          kubectl create secret generic harbor \
               --from-literal=HARBOR_ADMIN_PASSWORD=<secret> \
               --from-literal=REGISTRY_HTPASSWD=<secret> \
          @@ -102,4 +102,4 @@
               password: "changeit"
           
        3. To check if the installation is successful, run the command below:

          helm status <harbor-release> -n harbor
           
          You can also check ingress endpoints to get Harbor endpoint to enter Harbor UI:
          kubectl describe ingress <harbor_ingress> -n harbor
          -
        \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/install-ingress-nginx/index.html b/operator-guide/install-ingress-nginx/index.html index a22a53b50..0b356f9d8 100644 --- a/operator-guide/install-ingress-nginx/index.html +++ b/operator-guide/install-ingress-nginx/index.html @@ -1,4 +1,4 @@ - Install NGINX Ingress Controller - EPAM Delivery Platform

      Install NGINX Ingress Controller⚓︎

      Inspect the prerequisites and the main steps to perform for installing Install NGINX Ingress Controller on Kubernetes.

      Prerequisites⚓︎

      Installation⚓︎

      To install the ingress-nginx chart, follow the steps below:

      1. Create an ingress-nginx namespace:

        kubectl create namespace ingress-nginx
        + Install NGINX Ingress Controller - EPAM Delivery Platform      

        Install NGINX Ingress Controller⚓︎

        Inspect the prerequisites and the main steps to perform for installing Install NGINX Ingress Controller on Kubernetes.

        Prerequisites⚓︎

        Installation⚓︎

        To install the ingress-nginx chart, follow the steps below:

        1. Create an ingress-nginx namespace:

          kubectl create namespace ingress-nginx
           
        2. Add a chart repository:

          helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
           helm repo update
           
        3. Install the ingress-nginx chart:

          helm install ingress ingress-nginx/ingress-nginx \
          @@ -47,4 +47,4 @@
           serviceAccount:
             create: true
             name: nginx-ingress-service-account
          -

          Warning

          Align value controller.config.proxy-real-ip-cidr with AWS VPC CIDR.

          Note

          It is also possible to install the ingress controller via cluster add-ons. For details, please refer to the Install via Add-Ons page.

        \ No newline at end of file +

        Warning

        Align value controller.config.proxy-real-ip-cidr with AWS VPC CIDR.

        Note

        It is also possible to install the ingress controller via cluster add-ons. For details, please refer to the Install via Add-Ons page.

      \ No newline at end of file diff --git a/operator-guide/install-keycloak/index.html b/operator-guide/install-keycloak/index.html index cf9a9cb20..d3382fbc0 100644 --- a/operator-guide/install-keycloak/index.html +++ b/operator-guide/install-keycloak/index.html @@ -1,4 +1,4 @@ - Install Keycloak - EPAM Delivery Platform

      Install Keycloak⚓︎

      Inspect the prerequisites and the main steps to perform for installing Keycloak.

      Info

      The installation process below is given for a Kubernetes cluster. The steps that differ for an OpenShift cluster are indicated in the warnings blocks.

      Prerequisites⚓︎

      Info

      EDP team is using a Keycloakx helm chart from the codecentric repository, but other repositories can be used as well (e.g. Bitnami). Before installing Keycloak, it is necessary to install a PostgreSQL database.

      Info

      It is also possible to install Keycloak using the cluster add-ons. For details, please refer to the Install via Add-Ons page.

      PostgreSQL Installation⚓︎

      To install PostgreSQL, follow the steps below:

      1. Check that a security namespace is created. If not, run the following command to create it:

        kubectl create namespace security
        + Install Keycloak - EPAM Delivery Platform      

        Install Keycloak⚓︎

        Inspect the prerequisites and the main steps to perform for installing Keycloak.

        Info

        The installation process below is given for a Kubernetes cluster. The steps that differ for an OpenShift cluster are indicated in the warnings blocks.

        Prerequisites⚓︎

        Info

        EDP team is using a Keycloakx helm chart from the codecentric repository, but other repositories can be used as well (e.g. Bitnami). Before installing Keycloak, it is necessary to install a PostgreSQL database.

        Info

        It is also possible to install Keycloak using the cluster add-ons. For details, please refer to the Install via Add-Ons page.

        PostgreSQL Installation⚓︎

        To install PostgreSQL, follow the steps below:

        1. Check that a security namespace is created. If not, run the following command to create it:

          kubectl create namespace security
           

          Warning

          On the OpenShift platform, apply the SecurityContextConstraints resource. Change the namespace in the users section if required.

          View: keycloak-scc.yaml
          allowHostDirVolumePlugin: false
           allowHostIPC: false
           allowHostNetwork: false
          @@ -241,4 +241,4 @@
             port: 5432
             username: admin
             database: keycloak
          -

        Configuration⚓︎

        To prepare Keycloak for integration with EDP, follow the steps below:

        1. Ensure that the openshift realm is created.

        2. Create the edp_<EDP_PROJECT> user and set the password in the Master realm.

          Note

          This user should be used by EDP to access Keycloak. Please refer to the Install EDP page for details.

        3. In the Role Mapping tab, assign the proper roles to the user:

          • Realm Roles:

            • create-realm,
            • offline_access,
            • uma_authorization
          • Client Roles openshift-realm:

            • impersonation,
            • manage-authorization,
            • manage-clients,
            • manage-users

          Role mappings
          Role mappings

        \ No newline at end of file +

      Configuration⚓︎

      To prepare Keycloak for integration with EDP, follow the steps below:

      1. Ensure that the openshift realm is created.

      2. Create the edp_<EDP_PROJECT> user and set the password in the Master realm.

        Note

        This user should be used by EDP to access Keycloak. Please refer to the Install EDP page for details.

      3. In the Role Mapping tab, assign the proper roles to the user:

        • Realm Roles:

          • create-realm,
          • offline_access,
          • uma_authorization
        • Client Roles openshift-realm:

          • impersonation,
          • manage-authorization,
          • manage-clients,
          • manage-users

        Role mappings
        Role mappings

      \ No newline at end of file diff --git a/operator-guide/install-kiosk/index.html b/operator-guide/install-kiosk/index.html index 8c3eacab3..46d72b2c1 100644 --- a/operator-guide/install-kiosk/index.html +++ b/operator-guide/install-kiosk/index.html @@ -1,4 +1,4 @@ - Set Up Kiosk - EPAM Delivery Platform

      Set Up Kiosk⚓︎

      Kiosk is a multi-tenancy extension for managing tenants and namespaces in a shared Kubernetes cluster. Within EDP, Kiosk is used to separate resources and enables the following options (see more details):

      • Access to the EDP tenants in a Kubernetes cluster;
      • Multi-tenancy access at the service account level for application deploy.

      Inspect the main steps to set up Kiosk for the proceeding EDP installation.

      Note

      Kiosk deploy is mandatory for EDP v.2.8. In earlier versions, Kiosk is not implemented. Since EDP v.2.9.0, integration with Kiosk is an optional feature. You may not want to use it, so just skip those steps and disable in Helm parameters during EDP deploy.

      # global.kioskEnabled: <true/false>
      + Set Up Kiosk - EPAM Delivery Platform      

      Set Up Kiosk⚓︎

      Kiosk is a multi-tenancy extension for managing tenants and namespaces in a shared Kubernetes cluster. Within EDP, Kiosk is used to separate resources and enables the following options (see more details):

      • Access to the EDP tenants in a Kubernetes cluster;
      • Multi-tenancy access at the service account level for application deploy.

      Inspect the main steps to set up Kiosk for the proceeding EDP installation.

      Note

      Kiosk deploy is mandatory for EDP v.2.8. In earlier versions, Kiosk is not implemented. Since EDP v.2.9.0, integration with Kiosk is an optional feature. You may not want to use it, so just skip those steps and disable in Helm parameters during EDP deploy.

      # global.kioskEnabled: <true/false>
       

      Prerequisites⚓︎

      Note

      This tool needs to be installed in advance before deploying EDP.

      Installation⚓︎

      • Deploy Kiosk version 0.2.11 in the cluster. To install it, run the following command:
          # Install kiosk with helm v3
         
           helm repo add kiosk https://charts.devspace.sh/
        @@ -29,4 +29,4 @@
           kind: ClusterRole
           name: kiosk-edit
           apiGroup: rbac.authorization.k8s.io
        -
      • To provide access to the EDP tenant, generate kubeconfig with Service Account edp permission. The edp account created earlier is located in the security namespace.
      \ No newline at end of file +
    8. To provide access to the EDP tenant, generate kubeconfig with Service Account edp permission. The edp account created earlier is located in the security namespace.
    9. \ No newline at end of file diff --git a/operator-guide/install-loki/index.html b/operator-guide/install-loki/index.html index 9408ca3a4..d4bfd2cb2 100644 --- a/operator-guide/install-loki/index.html +++ b/operator-guide/install-loki/index.html @@ -1,4 +1,4 @@ - Install Grafana Loki - EPAM Delivery Platform

      Install Grafana Loki⚓︎

      EDP configures the logging with the help of Grafana Loki aggregation system.

      Installation⚓︎

      To install Loki, follow the steps below:

      1. Create logging namespace:

          kubectl create namespace logging
        + Install Grafana Loki - EPAM Delivery Platform      

        Install Grafana Loki⚓︎

        EDP configures the logging with the help of Grafana Loki aggregation system.

        Installation⚓︎

        To install Loki, follow the steps below:

        1. Create logging namespace:

            kubectl create namespace logging
           

          Note

          On the OpenShift cluster, run the oc command instead of the kubectl command.

        2. Add a chart repository:

            helm repo add grafana https://grafana.github.io/helm-charts
             helm repo update
           

          Note

          It is possible to use Amazon Simple Storage Service Amazon S3 as an object storage for Loki. To configure access, please refer to the IRSA for Loki documentation.

        3. Install Loki v.2.6.0:

            helm install loki grafana/loki \
          @@ -41,4 +41,4 @@
               eks.amazonaws.com/role-arn: "arn:aws:iam::<AWS_ACCOUNT_ID>:role/AWSIRSA‹CLUSTER_NAME›‹LOKI_NAMESPACE›Loki
           persistence:
             enabled: false
          -

          Note

          In case of using cluster scheduling and amazon-eks-pod-identity-webhook, it is necessary to restart the Loki pod after the cluster is up and running. Please refer to the Schedule Pods Restart documentation.

        4. Configure custom bucket policy to delete the old data.

        \ No newline at end of file +

        Note

        In case of using cluster scheduling and amazon-eks-pod-identity-webhook, it is necessary to restart the Loki pod after the cluster is up and running. Please refer to the Schedule Pods Restart documentation.

      2. Configure custom bucket policy to delete the old data.

      \ No newline at end of file diff --git a/operator-guide/install-reportportal/index.html b/operator-guide/install-reportportal/index.html index 063cc1bbc..7a792df4f 100644 --- a/operator-guide/install-reportportal/index.html +++ b/operator-guide/install-reportportal/index.html @@ -1,4 +1,4 @@ - Install ReportPortal - EPAM Delivery Platform

      Install ReportPortal⚓︎

      Inspect the prerequisites and the main steps to perform for installing ReportPortal.

      Prerequisites⚓︎

      Info

      Please refer to the ReportPortal Helm Chart section for details.

      MinIO Installation⚓︎

      To install MinIO, follow the steps below:

      1. Check that edp namespace is created. If not, run the following command to create it:

        kubectl create namespace edp
        + Install ReportPortal - EPAM Delivery Platform      

        Install ReportPortal⚓︎

        Inspect the prerequisites and the main steps to perform for installing ReportPortal.

        Prerequisites⚓︎

        Info

        Please refer to the ReportPortal Helm Chart section for details.

        MinIO Installation⚓︎

        To install MinIO, follow the steps below:

        1. Check that edp namespace is created. If not, run the following command to create it:

          kubectl create namespace edp
           

          For the OpenShift users:

          When using the OpenShift platform, install the SecurityContextConstraints resources.
          In case of using a custom namespace for the reportportal, change the namespace in the users section.

          View: report-portal-third-party-resources-scc.yaml
          apiVersion: security.openshift.io/v1
           kind: SecurityContextConstraints
           metadata:
          @@ -421,4 +421,4 @@
               component:  gateway
             sessionAffinity: None
             type: ClusterIP
          -

        Note

        For user access: default/1q2w3e
        For admin access: superadmin/erebus
        Please refer to the ReportPortal.io page for details.

        Note

        It is also possible to install ReportPortal via cluster add-ons. For details, please refer to the Install via Add-Ons page.

        \ No newline at end of file +

      Note

      For user access: default/1q2w3e
      For admin access: superadmin/erebus
      Please refer to the ReportPortal.io page for details.

      Note

      It is also possible to install ReportPortal via cluster add-ons. For details, please refer to the Install via Add-Ons page.

      \ No newline at end of file diff --git a/operator-guide/install-tekton/index.html b/operator-guide/install-tekton/index.html index 7d40809e4..861760250 100644 --- a/operator-guide/install-tekton/index.html +++ b/operator-guide/install-tekton/index.html @@ -1,4 +1,4 @@ - Install Tekton - EPAM Delivery Platform

      Install Tekton⚓︎

      EPAM Delivery Platform uses Tekton resources, such as Tasks, Pipelines, Triggers, Interceptors and Chains for running the CI/CD pipelines.

      Inspect the main steps to perform for installing the Tekton resources via the Tekton release files.

      Prerequisites⚓︎

      • Kubectl version 1.24.0 or higher is installed. Please refer to the Kubernetes official website for details.
      • For Openshift/OKD, the latest version of the oc utility is required. Please refer to the OKD page on GitHub for details.
      • Created AWS ECR repository for Kaniko cache. By default, the Kaniko cache repository name is kaniko-cache and this parameter is located in our Tekton common-library.

      Installation on Kubernetes Cluster⚓︎

      To install Tekton resources, follow the steps below:

      Info

      Please refer to the Install Tekton Pipelines and Install and set up Tekton Triggers sections for details.

      1. Install Tekton pipelines v0.53.4 using the release file:

        Note

        Tekton Pipeline resources are used for managing and running EDP Tekton Pipelines and Tasks. Please refer to the EDP Tekton Pipelines and EDP Tekton Tasks pages for details.

        kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/previous/v0.53.4/release.yaml
        + Install Tekton - EPAM Delivery Platform      

        Install Tekton⚓︎

        EPAM Delivery Platform uses Tekton resources, such as Tasks, Pipelines, Triggers, Interceptors and Chains for running the CI/CD pipelines.

        Inspect the main steps to perform for installing the Tekton resources via the Tekton release files.

        Prerequisites⚓︎

        • Kubectl version 1.24.0 or higher is installed. Please refer to the Kubernetes official website for details.
        • For Openshift/OKD, the latest version of the oc utility is required. Please refer to the OKD page on GitHub for details.
        • Created AWS ECR repository for Kaniko cache. By default, the Kaniko cache repository name is kaniko-cache and this parameter is located in our Tekton common-library.

        Installation on Kubernetes Cluster⚓︎

        To install Tekton resources, follow the steps below:

        Info

        Please refer to the Install Tekton Pipelines and Install and set up Tekton Triggers sections for details.

        1. Install Tekton pipelines v0.53.4 using the release file:

          Note

          Tekton Pipeline resources are used for managing and running EDP Tekton Pipelines and Tasks. Please refer to the EDP Tekton Pipelines and EDP Tekton Tasks pages for details.

          kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/previous/v0.53.4/release.yaml
           
        2. Install Tekton Triggers v0.25.3 using the release file:

          Note

          Tekton Trigger resources are used for managing and running EDP Tekton EventListeners, Triggers, TriggerBindings and TriggerTemplates. Please refer to the EDP Tekton Triggers page for details.

          kubectl apply -f https://storage.googleapis.com/tekton-releases/triggers/previous/v0.25.3/release.yaml
           
        3. Install Tekton Interceptors v0.25.3 using the release file:

          Note

          EPAM Delivery Platform uses GitLab, GitHub and Cel ClusterInterceptors for managing requests from webhooks.

          kubectl apply -f https://storage.googleapis.com/tekton-releases/triggers/previous/v0.25.3/interceptors.yaml
           
        4. Install Tekton Chains v0.19.0 using the release file:

          kubectl apply -f https://storage.googleapis.com/tekton-releases/chains/previous/v0.19.0/release.yaml
          @@ -17,4 +17,4 @@
           oc adm policy add-scc-to-user privileged system:serviceaccount:openshift-pipelines:pipelines-as-code-watcher
           oc adm policy add-scc-to-user privileged system:serviceaccount:openshift-pipelines:pipelines-as-code-webhook
           oc adm policy add-scc-to-user privileged system:serviceaccount:openshift-pipelines:default
          -
        \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/install-velero/index.html b/operator-guide/install-velero/index.html index dcd46b74f..a00edb578 100644 --- a/operator-guide/install-velero/index.html +++ b/operator-guide/install-velero/index.html @@ -1,4 +1,4 @@ - Install Velero - EPAM Delivery Platform

      Install Velero⚓︎

      Velero is an open source tool to safely back up, recover, and migrate Kubernetes clusters and persistent volumes. It works both on premises and in a public cloud. Velero consists of a server process running as a deployment in your Kubernetes cluster and a command-line interface (CLI) with which DevOps teams and platform operators configure scheduled backups, trigger ad-hoc backups, perform restores, and more.

      Installation⚓︎

      To install Velero, follow the steps below:

      1. Create velero namespace:

          kubectl create namespace velero
        + Install Velero - EPAM Delivery Platform      

        Install Velero⚓︎

        Velero is an open source tool to safely back up, recover, and migrate Kubernetes clusters and persistent volumes. It works both on premises and in a public cloud. Velero consists of a server process running as a deployment in your Kubernetes cluster and a command-line interface (CLI) with which DevOps teams and platform operators configure scheduled backups, trigger ad-hoc backups, perform restores, and more.

        Installation⚓︎

        To install Velero, follow the steps below:

        1. Create velero namespace:

            kubectl create namespace velero
           

          Note

          On an OpenShift cluster, run the oc command instead of kubectl one.

        2. Add a chart repository:

            helm repo add vmware-tanzu https://vmware-tanzu.github.io/helm-charts
             helm repo update
           

          Note

          Velero AWS Plugin requires access to AWS resources. To configure access, please refer to the IRSA for Velero documentation.

        3. Install Velero v.6.5.0:

            helm install velero vmware-tanzu/velero \
          @@ -39,4 +39,4 @@
           

          Note

          In case of using cluster scheduling and amazon-eks-pod-identity-webhook, it is necessary to restart the Velero pod after the cluster is up and running. Please refer to the Schedule Pods Restart documentation.

        4. Install the client side (velero cli) according to the official documentation.

        Configuration⚓︎

        1. Create backup for all components in the namespace:

            velero backup create <BACKUP_NAME> --include-namespaces <NAMESPACE>
           
        2. Create a daily backup of the namespace:

            velero schedule create <BACKUP_NAME>  --schedule "0 10 * * MON-FRI" --include-namespaces <NAMESPACE> --ttl 120h0m0s
           
        3. To restore from backup, use the following command:

            velero restore create <RESTORE_NAME> --from-backup <BACKUP_NAME>
          -
        \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/install-via-civo/index.html b/operator-guide/install-via-civo/index.html index 90d7ed98d..c7c1aeb20 100644 --- a/operator-guide/install-via-civo/index.html +++ b/operator-guide/install-via-civo/index.html @@ -1,5 +1,5 @@ - Install via Civo - EPAM Delivery Platform

      Install EDP via Civo⚓︎

      This documentation provides the detailed instructions on how to install the EPAM Delivery Platform via Civo Marketplace. As a prerequisite, ensure to sign up in Civo.

      Launch Cluster⚓︎

      The first step of the installation procedure is to launch the cluster. Please refer to the official instructions that describe this process in detail. To succeed, follow the steps below:

      1. Log in to the personal account.

      2. Create a new Kubernetes cluster with the parameters below. Please refer to the official guidelines for more details:

        • Name: demo
        • How many nodes: 1
        • Select size: Type: Standard, Size: Medium
        • Network: Default
        • Firewall: Create a new firewall with the 443 and 6443 ports opened
        • Advanced options: Kubernetes version: latest (currently 1.28.2)
        • Marketplace: Choose the Argo CD and Tekton stacks
      3. Wait till the cluster is created.

        Note

        The cluster deployment takes around two minutes. After cluster deployment, additional 5 minutes are required for both Argo CD and Tekton stacks to get deployed.

      4. As soon as cluster is deployed, ensure all the marketplace applications are installed, too:

        Check applications
        Check applications

      5. Download and apply the kubeconfig file:

        Download kubeconfig
        Download kubeconfig

      6. Ensure all the pods are up and running in both the Tekton and Argo CD namespaces after 5 minutes of waiting. Restart deployments if the pods are failed to deploy:

        kubectl get ns
        + Install via Civo - EPAM Delivery Platform      

        Install EDP via Civo⚓︎

        This documentation provides the detailed instructions on how to install the EPAM Delivery Platform via Civo Marketplace. As a prerequisite, ensure to sign up in Civo.

        Launch Cluster⚓︎

        The first step of the installation procedure is to launch the cluster. Please refer to the official instructions that describe this process in detail. To succeed, follow the steps below:

        1. Log in to the personal account.

        2. Create a new Kubernetes cluster with the parameters below. Please refer to the official guidelines for more details:

          • Name: demo
          • How many nodes: 1
          • Select size: Type: Standard, Size: Medium
          • Network: Default
          • Firewall: Create a new firewall with the 443 and 6443 ports opened
          • Advanced options: Kubernetes version: latest (currently 1.28.2)
          • Marketplace: Choose the Argo CD and Tekton stacks
        3. Wait till the cluster is created.

          Note

          The cluster deployment takes around two minutes. After cluster deployment, additional 5 minutes are required for both Argo CD and Tekton stacks to get deployed.

        4. As soon as cluster is deployed, ensure all the marketplace applications are installed, too:

          Check applications
          Check applications

        5. Download and apply the kubeconfig file:

          Download kubeconfig
          Download kubeconfig

        6. Ensure all the pods are up and running in both the Tekton and Argo CD namespaces after 5 minutes of waiting. Restart deployments if the pods are failed to deploy:

          kubectl get ns
           kubectl get pods -n tekton-pipelines
           kubectl get pods -n argocd
           

          Verify installation
          Verify installation

        Install EDP⚓︎

        As soon as the cluster is deployed, it is time to install the EDP application.

        1. In the Civo portal, navigate to Marketplace -> CI/CD:

          Civo Marketplace
          Civo Marketplace

        2. Select EDP and choose which Version Control Systems you would prefer to integrate it with and click the Install Apps button:

          Add EDP
          Add EDP

        3. Wait till the EDP app appears in the Installed applications list:

          EDP installed
          EDP installed

        4. Wait till all the pods are up and running. Use the kubectl get pods command to check the status of the pods:

          kubectl get pods -n edp
          -

          EDP pods
          EDP pods

        5. As soon as all the pods are deployed. Navigate to the Cluster Information tab and copy the DNS name:

          Getting DNS
          Getting DNS

        6. In the new browser tab, access the EDP Portal UI by typing the URL according to the https://edp-headlamp-edp.<DNS_name> format.

        7. Accept the security warning and click the service access token link to open the instructions on how to get a token to log in to the EDP Portal UI.

        8. As soon as the token is created, paste it in the ID token field and click the Authenticate button.

        9. Click the notification in the bottom left corner to open the Cluster Settings menu:

          Click notification
          Click notification

        10. In the Cluster Settings menu, enter edp in both default and allowed namespaces and click the Back button:

          Note

          Don't forget to click the + button to add the allowed namespace.

          Cluster Settings menu
          Cluster Settings menu

        Congratulations! You have just installed the EPAM Delivery Platform on the Civo cluster. Now you are ready to proceed with integrating EDP with all the necessary third-party tools. Navigate to the Integrate SonarQube page to proceed with onboarding EDP.

        \ No newline at end of file +

        EDP pods
        EDP pods

      7. As soon as all the pods are deployed. Navigate to the Cluster Information tab and copy the DNS name:

        Getting DNS
        Getting DNS

      8. In the new browser tab, access the EDP Portal UI by typing the URL according to the https://edp-headlamp-edp.<DNS_name> format.

      9. Accept the security warning and click the service access token link to open the instructions on how to get a token to log in to the EDP Portal UI.

      10. As soon as the token is created, paste it in the ID token field and click the Authenticate button.

      11. Click the notification in the bottom left corner to open the Cluster Settings menu:

        Click notification
        Click notification

      12. In the Cluster Settings menu, enter edp in both default and allowed namespaces and click the Back button:

        Note

        Don't forget to click the + button to add the allowed namespace.

        Cluster Settings menu
        Cluster Settings menu

      Congratulations! You have just installed the EPAM Delivery Platform on the Civo cluster. Now you are ready to proceed with integrating EDP with all the necessary third-party tools. Navigate to the Integrate SonarQube page to proceed with onboarding EDP.

      \ No newline at end of file diff --git a/operator-guide/install-via-helmfile/index.html b/operator-guide/install-via-helmfile/index.html index 78a596114..cb3d369f0 100644 --- a/operator-guide/install-via-helmfile/index.html +++ b/operator-guide/install-via-helmfile/index.html @@ -1,4 +1,4 @@ - Install via Helmfile - EPAM Delivery Platform

      Install via Helmfile⚓︎

      This article provides the instruction on how to deploy EDP and components in Kubernetes using Helmfile that is intended for deploying Helm charts. Helmfile templates are available in GitHub repository.

      Important

      The Helmfile installation method for EPAM Delivery Platform (EDP) is currently not actively maintained. We strongly recommend exploring alternative installation options for the most up-to-date and well-supported deployment experience. You may consider using the Add-Ons approach or opting for installation via the AWS Marketplace to ensure a reliable and secure deployment of EDP.

      Prerequisites⚓︎

      The following tools and plugins must be installed:

      Helmfile Structure⚓︎

      • The envs/common.yaml file contains the specification for environments pattern, list of helm repositories from which it is necessary to fetch the helm charts and additional Helm parameters.
      • The envs/platform.yaml file contains global parameters that are used in various Helmfiles.
      • The releases/envs/ contains symbol links to environments files.
      • The releases/*.yaml file contains description of parameters that is used when deploying a Helm chart.
      • The helmfile.yaml file defines components to be installed by defining a path to Helm releases files.
      • The envs/ci.yaml file contains stub parameters for CI linter.
      • The test/lint-ci.sh script for running CI linter with debug loglevel and stub parameters.
      • The resources/*.yaml file contains additional resources for the OpenShift platform.

      Operate Helmfile⚓︎

      Before applying the Helmfile, please fill in the global parameters in the envs/platform.yaml (check the examples in the envs/ci.yaml) and releases/*.yaml files for every Helm deploy.

      Pay attention to the following recommendations while working with the Helmfile:

      • To launch Lint, run the test/lint-ci.sh script.
      \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/installation-overview/index.html b/operator-guide/installation-overview/index.html index cf0731b19..33ff807c3 100644 --- a/operator-guide/installation-overview/index.html +++ b/operator-guide/installation-overview/index.html @@ -1 +1 @@ - Overview - EPAM Delivery Platform

      Installation Overview⚓︎

      This documentation provides a concise introduction to the EPAM Delivery Platform installation options. EPAM Delivery Platform is a flexible tool that can be installed using different methods. Each of them comprises with its own benefits:

      • Quick Start Guide: This option is ideal for users looking to quickly deploy EDP with minimal prerequisites. Watch our video tutorial for a guided installation experience.
      • Install via Add-Ons: Recommended for advanced EDP users, this installation streamlines the setup of supported integrations and follows the GitOps approach. Familiarity with Argo CD is recommended.
      • Install via Helm Charts: While more complex, this method offers the highest level of flexibility, allowing for customization to suit specific needs. It requires a good understanding of EDP.
      • Install via AWS Marketplace: Optimal for users leveraging AWS infrastructure, offering a minimal setup for testing EDP functionality.
      • Install via Civo: Preferred for users deploying EDP in a Civo cluster. Follow our step-by-step video tutorial for onboarding EDP to a new Civo cluster.
      \ No newline at end of file + Overview - EPAM Delivery Platform

      Installation Overview⚓︎

      This documentation provides a concise introduction to the EPAM Delivery Platform installation options. EPAM Delivery Platform is a flexible tool that can be installed using different methods. Each of them comprises with its own benefits:

      • Quick Start Guide: This option is ideal for users looking to quickly deploy EDP with minimal prerequisites. Watch our video tutorial for a guided installation experience.
      • Install via Add-Ons: Recommended for advanced EDP users, this installation streamlines the setup of supported integrations and follows the GitOps approach. Familiarity with Argo CD is recommended.
      • Install via Helm Charts: While more complex, this method offers the highest level of flexibility, allowing for customization to suit specific needs. It requires a good understanding of EDP.
      • Install via AWS Marketplace: Optimal for users leveraging AWS infrastructure, offering a minimal setup for testing EDP functionality.
      • Install via Civo: Preferred for users deploying EDP in a Civo cluster. Follow our step-by-step video tutorial for onboarding EDP to a new Civo cluster.
      \ No newline at end of file diff --git a/operator-guide/jira-gerrit-integration/index.html b/operator-guide/jira-gerrit-integration/index.html index 16689a3cd..b7ec0c5b2 100644 --- a/operator-guide/jira-gerrit-integration/index.html +++ b/operator-guide/jira-gerrit-integration/index.html @@ -1 +1 @@ - Adjust VCS Integration With Jira - EPAM Delivery Platform

      Adjust VCS Integration With Jira⚓︎

      In order to adjust the Version Control System integration with Jira Server, first make sure you have the following prerequisites:

      • VCS Server
      • Jira
      • Crucible

      When checked the prerequisites, follow the steps below to proceed with the integration:

      1. Integrate every project in VCS Server with every project in Crucible by creating a corresponding request in EPAM Support Portal. Add the repositories links and fill in the Keep Informed field as this request must be approved.

        Request example
        Request example

      2. Provide additional details to the support team. If the VCS is Gerrit, inspect the sample below of its integration:

        2.1 Create a new "crucible-" user in Gerrit with SSH key and add a new user to the "Non-Interactive Users" Gerrit group;

        2.2 Create a new group in Gerrit "crucible-watcher-group" and add the "crucible-" user;

        2.3 Provide access to All-Projects for the "crucible-watcher-group" group:

        Gerrit All-Projects configuration
        Gerrit All-Projects configuration

        Gerrit All-Projects configuration
        Gerrit All-Projects configuration

      3. To link commits with Jira ticket, being in Gerrit, enter a Jira ticket ID in a commit message using the specific format:

        [PROJECT-CODE-1234]: commit message

        where PROJECT-CODE is a specific code of a project, 1234 is an ID number, and a commit message.

      4. As a result, all Gerrit commits will be displayed on Crucible:

        Crucible project
        Crucible project

      \ No newline at end of file + Adjust VCS Integration With Jira - EPAM Delivery Platform

      Adjust VCS Integration With Jira⚓︎

      In order to adjust the Version Control System integration with Jira Server, first make sure you have the following prerequisites:

      • VCS Server
      • Jira
      • Crucible

      When checked the prerequisites, follow the steps below to proceed with the integration:

      1. Integrate every project in VCS Server with every project in Crucible by creating a corresponding request in EPAM Support Portal. Add the repositories links and fill in the Keep Informed field as this request must be approved.

        Request example
        Request example

      2. Provide additional details to the support team. If the VCS is Gerrit, inspect the sample below of its integration:

        2.1 Create a new "crucible-" user in Gerrit with SSH key and add a new user to the "Non-Interactive Users" Gerrit group;

        2.2 Create a new group in Gerrit "crucible-watcher-group" and add the "crucible-" user;

        2.3 Provide access to All-Projects for the "crucible-watcher-group" group:

        Gerrit All-Projects configuration
        Gerrit All-Projects configuration

        Gerrit All-Projects configuration
        Gerrit All-Projects configuration

      3. To link commits with Jira ticket, being in Gerrit, enter a Jira ticket ID in a commit message using the specific format:

        [PROJECT-CODE-1234]: commit message

        where PROJECT-CODE is a specific code of a project, 1234 is an ID number, and a commit message.

      4. As a result, all Gerrit commits will be displayed on Crucible:

        Crucible project
        Crucible project

      \ No newline at end of file diff --git a/operator-guide/jira-integration/index.html b/operator-guide/jira-integration/index.html index 529c839bf..f410371f9 100644 --- a/operator-guide/jira-integration/index.html +++ b/operator-guide/jira-integration/index.html @@ -1,4 +1,4 @@ - Adjust Jira Integration - EPAM Delivery Platform

      Adjust Jira Integration⚓︎

      This documentation guide provides step-by-step instructions for enabling the Jira integration option in the EDP Portal UI for EPAM Delivery Platform. Jira integration allows including useful metadata in Jira tickets.

      Overview⚓︎

      Integrating Jira can provide a number of benefits, such as increased visibility and traceability, automatic linking code changes to relevant Jira issues, streamlining the management and tracking of development progress.

      By linking CI pipelines to Jira issues, teams can get a better understanding of the status of their work and how it relates to the overall development process. This can help to improve communication and collaboration, and ultimately lead to faster and more efficient delivery of software.

      Enabling Jira integration allows for the automatic population of three fields in Jira tickets: Fix Versions, Components, and Labels. Each of these fields provides distinct benefits:

      • Fix Versions: helps track progress against release schedules;
      • Components: allows grouping related issues together;
      • Labels: enables identification of specific types of work.

      Teams can utilize these fields to enhance their work prioritization, identify dependencies, improve collaboration, and ultimately achieve faster software delivery.

      Integration Procedure⚓︎

      In order to adjust the Jira server integration, add the JiraServer CR by performing the following:

      1. Provision the ci-jira secret using EDP Portal, Manifest or with the externalSecrets operator:

        Go to EDP Portal -> EDP -> Configuration -> Jira. Update or fill in the URL, User, Password fields and click the Save button:

        Jira update manual secret
        Jira update manual secret

        apiVersion: v1
        + Adjust Jira Integration - EPAM Delivery Platform      

        Adjust Jira Integration⚓︎

        This documentation guide provides step-by-step instructions for enabling the Jira integration option in the EDP Portal UI for EPAM Delivery Platform. Jira integration allows including useful metadata in Jira tickets.

        Overview⚓︎

        Integrating Jira can provide a number of benefits, such as increased visibility and traceability, automatic linking code changes to relevant Jira issues, streamlining the management and tracking of development progress.

        By linking CI pipelines to Jira issues, teams can get a better understanding of the status of their work and how it relates to the overall development process. This can help to improve communication and collaboration, and ultimately lead to faster and more efficient delivery of software.

        Enabling Jira integration allows for the automatic population of three fields in Jira tickets: Fix Versions, Components, and Labels. Each of these fields provides distinct benefits:

        • Fix Versions: helps track progress against release schedules;
        • Components: allows grouping related issues together;
        • Labels: enables identification of specific types of work.

        Teams can utilize these fields to enhance their work prioritization, identify dependencies, improve collaboration, and ultimately achieve faster software delivery.

        Integration Procedure⚓︎

        In order to adjust the Jira server integration, add the JiraServer CR by performing the following:

        1. Provision the ci-jira secret using EDP Portal, Manifest or with the externalSecrets operator:

          Go to EDP Portal -> EDP -> Configuration -> Jira. Update or fill in the URL, User, Password fields and click the Save button:

          Jira update manual secret
          Jira update manual secret

          apiVersion: v1
           kind: Secret
           metadata:
             name: ci-jira
          @@ -21,4 +21,4 @@
             apiUrl: 'https://jira-api.example.com'
             credentialName: ci-jira
             rootUrl: 'https://jira.example.com'
          -

          Note

          The value of the credentialName property is the name of the Secret, which is indicated in the first point above.

        Enable Jira Using Helm Chart⚓︎

        There is also a possibility to set up Jira integration when deploying EPAM Delivery Platform. To follow this approach, please familiarize yourself with the following parameters of the values.yaml file in the EDP installer. Enabling the jira.integration parameter creates the following custom resources:

        • EDPComponent;
        • JiraServer;
        • External Secrets Operator (in case it is used).

        To set up Jira integration along with EDP, follow the steps below:

        1. Create the ci-jira secret in the edp namespace as it's described above.

        2. Deploy the platform with the jira.integration parameter set to true in the values.yaml file.

        Jira Integration Usage⚓︎

        To use Jira integration, you need to set up your codebases accordingly.

        When creating a codebase, navigate to the Advanced Settings tab. Select the Integrate with Jira server check box and fill in the required fields:

        Advanced settings
        Advanced settings

        There are four predefined variables with the respective values that can be specified singly or as a combination. These variables show different data depending on which versioning type is currently used:

        If the EDP versioning type is used:

        • EDP_COMPONENT – returns application-name;
        • EDP_VERSION – returns 0.0.0-SNAPSHOT or 0.0.0-RC;
        • EDP_SEM_VERSION – returns 0.0.0;
        • EDP_GITTAG – returns build/0.0.0-SNAPSHOT.2 or build/0.0.0-RC.2.

        If the default versioning type is used:

        • EDP_COMPONENT – returns application-name;
        • EDP_VERSION – returns the date when the application was tagged. (Example: 20231023-131217);
        • EDP_SEM_VERSION – returns the date when the application was tagged. (Example: 20231023-131217);
        • EDP_GITTAG – returns the date when the application was tagged. (Example: 20231023-131217).

        Note

        There are no character restrictions when combining the variables. You can concatenate them using the dash sign. Combination samples:
        EDP_SEM_VERSION-EDP_COMPONENT;
        EDP_COMPONENT-hello-world/EDP_VERSION;
        etc.

        If the Jira integration is set up correctly, you will start seeing additional metadata in the tickets:

        Supplemental information
        Supplemental information

        If metadata is not visible in a Jira ticket, check the status field of the JiraIssueMetadata Custom Resources in the edp namespace. The codebase operator deletes this resource after successful processing, but in case of an error, the 'JiraIssueMetadata' resource may still exist within the namespace.

        \ No newline at end of file +

        Note

        The value of the credentialName property is the name of the Secret, which is indicated in the first point above.

      Enable Jira Using Helm Chart⚓︎

      There is also a possibility to set up Jira integration when deploying EPAM Delivery Platform. To follow this approach, please familiarize yourself with the following parameters of the values.yaml file in the EDP installer. Enabling the jira.integration parameter creates the following custom resources:

      • EDPComponent;
      • JiraServer;
      • External Secrets Operator (in case it is used).

      To set up Jira integration along with EDP, follow the steps below:

      1. Create the ci-jira secret in the edp namespace as it's described above.

      2. Deploy the platform with the jira.integration parameter set to true in the values.yaml file.

      Jira Integration Usage⚓︎

      To use Jira integration, you need to set up your codebases accordingly.

      When creating a codebase, navigate to the Advanced Settings tab. Select the Integrate with Jira server check box and fill in the required fields:

      Advanced settings
      Advanced settings

      There are four predefined variables with the respective values that can be specified singly or as a combination. These variables show different data depending on which versioning type is currently used:

      If the EDP versioning type is used:

      • EDP_COMPONENT – returns application-name;
      • EDP_VERSION – returns 0.0.0-SNAPSHOT or 0.0.0-RC;
      • EDP_SEM_VERSION – returns 0.0.0;
      • EDP_GITTAG – returns build/0.0.0-SNAPSHOT.2 or build/0.0.0-RC.2.

      If the default versioning type is used:

      • EDP_COMPONENT – returns application-name;
      • EDP_VERSION – returns the date when the application was tagged. (Example: 20231023-131217);
      • EDP_SEM_VERSION – returns the date when the application was tagged. (Example: 20231023-131217);
      • EDP_GITTAG – returns the date when the application was tagged. (Example: 20231023-131217).

      Note

      There are no character restrictions when combining the variables. You can concatenate them using the dash sign. Combination samples:
      EDP_SEM_VERSION-EDP_COMPONENT;
      EDP_COMPONENT-hello-world/EDP_VERSION;
      etc.

      If the Jira integration is set up correctly, you will start seeing additional metadata in the tickets:

      Supplemental information
      Supplemental information

      If metadata is not visible in a Jira ticket, check the status field of the JiraIssueMetadata Custom Resources in the edp namespace. The codebase operator deletes this resource after successful processing, but in case of an error, the 'JiraIssueMetadata' resource may still exist within the namespace.

      \ No newline at end of file diff --git a/operator-guide/kaniko-irsa/index.html b/operator-guide/kaniko-irsa/index.html index 8565a45e1..b6a532c6d 100644 --- a/operator-guide/kaniko-irsa/index.html +++ b/operator-guide/kaniko-irsa/index.html @@ -1,4 +1,4 @@ - IAM Roles for Kaniko Service Accounts - EPAM Delivery Platform

      IAM Roles for Kaniko Service Accounts⚓︎

      Note

      The information below is relevant in case ECR is used as Docker container registry. Make sure that IRSA is enabled and amazon-eks-pod-identity-webhook is deployed according to the Associate IAM Roles With Service Accounts documentation.

      The "build-image-kaniko" stage manages ECR through IRSA that should be available on the cluster. Follow the steps below to create a required role:

      1. Create AWS IAM Policy "AWSIRSA‹CLUSTER_NAME›‹EDP_NAMESPACE›Kaniko_policy":

        {
        + IAM Roles for Kaniko Service Accounts - EPAM Delivery Platform      

        IAM Roles for Kaniko Service Accounts⚓︎

        Note

        The information below is relevant in case ECR is used as Docker container registry. Make sure that IRSA is enabled and amazon-eks-pod-identity-webhook is deployed according to the Associate IAM Roles With Service Accounts documentation.

        The "build-image-kaniko" stage manages ECR through IRSA that should be available on the cluster. Follow the steps below to create a required role:

        1. Create AWS IAM Policy "AWSIRSA‹CLUSTER_NAME›‹EDP_NAMESPACE›Kaniko_policy":

          {
             "Version": "2012-10-17",
             "Statement": [
               {
          @@ -41,4 +41,4 @@
               }
             ]
           }
          -
        2. Attach the "AWSIRSA‹CLUSTER_NAME›‹EDP_NAMESPACE›Kaniko_policy" policy to the "AWSIRSA‹CLUSTER_NAME›‹EDP_NAMESPACE›Kaniko" role.

        3. Define the resulted arn role value into the kaniko.roleArn parameter in values.yaml during the EDP installation.

        \ No newline at end of file +
      2. Attach the "AWSIRSA‹CLUSTER_NAME›‹EDP_NAMESPACE›Kaniko_policy" policy to the "AWSIRSA‹CLUSTER_NAME›‹EDP_NAMESPACE›Kaniko" role.

      3. Define the resulted arn role value into the kaniko.roleArn parameter in values.yaml during the EDP installation.

      \ No newline at end of file diff --git a/operator-guide/kibana-ilm-rollover/index.html b/operator-guide/kibana-ilm-rollover/index.html index 8cb64a0c2..3e7d5e03a 100644 --- a/operator-guide/kibana-ilm-rollover/index.html +++ b/operator-guide/kibana-ilm-rollover/index.html @@ -1,4 +1,4 @@ - Aggregate Application Logs Using EFK Stack - EPAM Delivery Platform

      Aggregate Application Logs Using EFK Stack⚓︎

      This documentation describes the advantages of EFK stack over the traditional ELK stack, explains the value that this stack brings to EDP and instructs how to set up the EFK stack to integrate the advanced logging system with your application.

      ELK Stack Overview⚓︎

      The ELK (Elasticsearch, Logstash and Kibana) stack gives the ability to aggregate logs from all the managed systems and applications, analyze these logs and create visualizations for application and infrastructure monitoring, faster troubleshooting, security analytics and more.

      Here is a brief description of the ELK stack default components:

      • Beats family - The logs shipping tool that conveys logs from the source locations, such as Filebeat, Metricbeat, Packetbeat, etc. Beats can work instead of Logstash or along with it.
      • Logstash - The log processing framework for log collecting, processing, storing and searching activities.
      • Elasticsearch - The distributed search and analytics engine based on Lucene Java library.
      • Kibana - The visualization engine that queries the data from Elasticsearch.

      Default components of ELK Stack
      ELK Stack

      EFK Stack Overview⚓︎

      We use FEK (also called EFK) (Fluent Bit, Elasticsearch, Kibana) stack in Kubernetes instead of ELK because this stack provides us with the support for Logsight for Stage Verification and Incident Detection. In addition to it, Fluent Bit has a smaller memory fingerprint than Logstash. Fluent Bit has the Inputs, Parsers, Filters and Outputs plugins similarly to Logstash.

      Default components of FEK Stack
      FEK Stack

      Automate Elasticsearch Index Rollover With ILM⚓︎

      In this guide, index rollover with the Index Lifecycle Management ILM is automated in the FEK stack.

      The resources can be created via API using curl, Postman, Kibana Dev Tools console or via GUI. They are going to be created them using Kibana Dev Tools.

      1. Go to Management → Dev Tools in the Kibana dashboard:

        Index Pattern
        Dev Tools

      2. Create index lifecycle policy with the index rollover:

        Note

        This policy can also be created in GUI in Management → Stack Management → Index Lifecycle Policies.

        Index Lifecycle has several phases: Hot, Warm, Cold, Frozen, Delete. Indices also have different priorities in each phase. The warmer the phase, the higher the priority is supposed to be, e.g., 100 for the hot phase, 50 for the warm phase, and 0 for the cold phase.

        In this Use Case, only the Hot and Delete phases are configured. So an index will be created, rolled over to a new index when 1gb in size or 1day in time and deleted in 7 days. The rollover may not happen exactly at 1GB because it depends on how often Kibana checks the index size. Kibana usually checks the index size every 10 minutes but this can be changed by setting the indices.lifecycle.poll_interval monitoring timer.

        The index lifecycle policy example:

        Index Lifecycle Policy
        PUT _ilm/policy/fluent-bit-policy
        + Aggregate Application Logs Using EFK Stack - EPAM Delivery Platform      

        Aggregate Application Logs Using EFK Stack⚓︎

        This documentation describes the advantages of EFK stack over the traditional ELK stack, explains the value that this stack brings to EDP and instructs how to set up the EFK stack to integrate the advanced logging system with your application.

        ELK Stack Overview⚓︎

        The ELK (Elasticsearch, Logstash and Kibana) stack gives the ability to aggregate logs from all the managed systems and applications, analyze these logs and create visualizations for application and infrastructure monitoring, faster troubleshooting, security analytics and more.

        Here is a brief description of the ELK stack default components:

        • Beats family - The logs shipping tool that conveys logs from the source locations, such as Filebeat, Metricbeat, Packetbeat, etc. Beats can work instead of Logstash or along with it.
        • Logstash - The log processing framework for log collecting, processing, storing and searching activities.
        • Elasticsearch - The distributed search and analytics engine based on Lucene Java library.
        • Kibana - The visualization engine that queries the data from Elasticsearch.

        Default components of ELK Stack
        ELK Stack

        EFK Stack Overview⚓︎

        We use FEK (also called EFK) (Fluent Bit, Elasticsearch, Kibana) stack in Kubernetes instead of ELK because this stack provides us with the support for Logsight for Stage Verification and Incident Detection. In addition to it, Fluent Bit has a smaller memory fingerprint than Logstash. Fluent Bit has the Inputs, Parsers, Filters and Outputs plugins similarly to Logstash.

        Default components of FEK Stack
        FEK Stack

        Automate Elasticsearch Index Rollover With ILM⚓︎

        In this guide, index rollover with the Index Lifecycle Management ILM is automated in the FEK stack.

        The resources can be created via API using curl, Postman, Kibana Dev Tools console or via GUI. They are going to be created them using Kibana Dev Tools.

        1. Go to Management → Dev Tools in the Kibana dashboard:

          Index Pattern
          Dev Tools

        2. Create index lifecycle policy with the index rollover:

          Note

          This policy can also be created in GUI in Management → Stack Management → Index Lifecycle Policies.

          Index Lifecycle has several phases: Hot, Warm, Cold, Frozen, Delete. Indices also have different priorities in each phase. The warmer the phase, the higher the priority is supposed to be, e.g., 100 for the hot phase, 50 for the warm phase, and 0 for the cold phase.

          In this Use Case, only the Hot and Delete phases are configured. So an index will be created, rolled over to a new index when 1gb in size or 1day in time and deleted in 7 days. The rollover may not happen exactly at 1GB because it depends on how often Kibana checks the index size. Kibana usually checks the index size every 10 minutes but this can be changed by setting the indices.lifecycle.poll_interval monitoring timer.

          The index lifecycle policy example:

          Index Lifecycle Policy
          PUT _ilm/policy/fluent-bit-policy
           {
             "policy": {
               "phases": {
          @@ -192,4 +192,4 @@
               Trace_Error     On
           
        3. Restart Fluent Bit pods.

        Fluent Bit will be producing a new index every day with the new date in its name like in the fluent-bit-kube-2023.03.18 name. Index deleting will be performed according to the policy.

        Tips on Fluent Bit Debugging⚓︎

        If you experience a lot of difficulties when dealing with Fluent Bit, this section may help you.

        Fluent Bit has docker images labelled -debug, e.g., cr.fluentbit.io/fluent/fluent-bit:2.0.9-debug.

        Change that image in the Kubernetes Fluent Bit DaemonSet and add the Trace_Error On parameter to the [OUTPUT] section in the Fluent Bit configmap:

        [OUTPUT]
           Trace_Error On
        -

        After adding the parameter above, you will start seeing more informative logs that will probably help you find out the reason of the problem.

        \ No newline at end of file +

        After adding the parameter above, you will start seeing more informative logs that will probably help you find out the reason of the problem.

      \ No newline at end of file diff --git a/operator-guide/kubernetes-cluster-settings/index.html b/operator-guide/kubernetes-cluster-settings/index.html index fdf06ae39..3dd52cf60 100644 --- a/operator-guide/kubernetes-cluster-settings/index.html +++ b/operator-guide/kubernetes-cluster-settings/index.html @@ -1,4 +1,4 @@ - Set Up Kubernetes - EPAM Delivery Platform

      Set Up Kubernetes⚓︎

      Make sure the cluster meets the following conditions:

      1. Kubernetes cluster is installed with minimum 2 worker nodes with total capacity 8 Cores and 32Gb RAM.

      2. Machine with kubectl is installed with a cluster-admin access to the Kubernetes cluster.

      3. Ingress controller is installed in a cluster, for example ingress-nginx.

      4. Ingress controller is configured with the disabled HTTP/2 protocol and header size of 64k support.

        Find below an example of the Config Map for the NGINX Ingress controller:

        kind: ConfigMap
        + Set Up Kubernetes - EPAM Delivery Platform      

        Set Up Kubernetes⚓︎

        Make sure the cluster meets the following conditions:

        1. Kubernetes cluster is installed with minimum 2 worker nodes with total capacity 8 Cores and 32Gb RAM.

        2. Machine with kubectl is installed with a cluster-admin access to the Kubernetes cluster.

        3. Ingress controller is installed in a cluster, for example ingress-nginx.

        4. Ingress controller is configured with the disabled HTTP/2 protocol and header size of 64k support.

          Find below an example of the Config Map for the NGINX Ingress controller:

          kind: ConfigMap
           apiVersion: v1
           metadata:
             name: nginx-configuration
          @@ -43,4 +43,4 @@
           reclaimPolicy: Retain
           volumeBindingMode: Immediate
           allowVolumeExpansion: true
          -
      \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/logsight-integration/index.html b/operator-guide/logsight-integration/index.html index 9e4c7671c..f5ca20c89 100644 --- a/operator-guide/logsight-integration/index.html +++ b/operator-guide/logsight-integration/index.html @@ -1,4 +1,4 @@ - Logsight Integration - EPAM Delivery Platform

      Logsight Integration⚓︎

      Logsight can be integrated with the CI/CD pipeline. It connects to log data sources, analyses collected logs, and evaluates deployment risk scores.

      Overview⚓︎

      In order to understand if a microservice or a component is ready for the deployment, EDP suggests analyzing logs via Logsight to decide if the deployment is risky or not.

      Please find more about Logsight in the official documentation:

      Logsight as a Quality Gate⚓︎

      Integration with Logsight allows enhancing and optimizing software releases by creating an additional quality gate.

      Logsight can be configured in two ways:

      • SAAS - online system; for this solution a connection string is required.
      • Self-deployment - local installation.

      To work with Logsight, a new Deployment Risk stage must be added to the pipeline. On this stage, the logs are analyzed with the help of Logsight mechanisms.

      On the verification screen of Logsight, continuous verification of the application deployment can be monitored, and tests can be compared for detecting test flakiness.

      For example, two versions of a microservice can be compared in order to detect critical differences. Risk score will be calculated for the state reached by version A and version B. Afterwards, the deployment risk will be calculated based on individual risk scores.

      If the deployment failure risk is greater than a predefined threshold, the verification gate blocks the deployment from going to the target environment. In such case, the Deployment Risk stage of the pipeline is not passed, and additional attention is required. The exact log messages can be displayed in the Logsight verification screen, to help debug the problem.

      Use Logsight for EDP Development⚓︎

      Please find below the detailed description of Logsight integration with EDP.

      Deployment Approach⚓︎

      EDP uses Logsight in a self-deploying mode.

      Logsight provides a deployment approach using Helm charts. Please find below the stack of components that must be deployed:

      • logsight - the core component.
      • logsight-backend - the backend that provides all necessary APIs and user management.
      • logsight-frontend - the frontend that provides the user interface.
      • logsight-result-api - responsible for obtaining results, for example, during the verification.

      Below is a diagram of interaction when integrating the components:

      Logsight Structure
      Logsight Structure

      Configure FluentBit for Sending Log Data⚓︎

      Logsight is integrated with the EDP logging stack. The integration is based on top of the EFK (ElasticSearch-FluentBit-Kibana) stack. It is necessary to deploy a stack with the security support, namely, enable the certificate support.

      A FluentBit config indicates the namespace from which the logs will be received for further analysis. Below is an example of the FluentBit config for getting logs from the edp-delivery-edp-delivery-sit namespace:

      View: fluent-bit.conf
      [INPUT]
      + Logsight Integration - EPAM Delivery Platform      

      Logsight Integration⚓︎

      Logsight can be integrated with the CI/CD pipeline. It connects to log data sources, analyses collected logs, and evaluates deployment risk scores.

      Overview⚓︎

      In order to understand if a microservice or a component is ready for the deployment, EDP suggests analyzing logs via Logsight to decide if the deployment is risky or not.

      Please find more about Logsight in the official documentation:

      Logsight as a Quality Gate⚓︎

      Integration with Logsight allows enhancing and optimizing software releases by creating an additional quality gate.

      Logsight can be configured in two ways:

      • SAAS - online system; for this solution a connection string is required.
      • Self-deployment - local installation.

      To work with Logsight, a new Deployment Risk stage must be added to the pipeline. On this stage, the logs are analyzed with the help of Logsight mechanisms.

      On the verification screen of Logsight, continuous verification of the application deployment can be monitored, and tests can be compared for detecting test flakiness.

      For example, two versions of a microservice can be compared in order to detect critical differences. Risk score will be calculated for the state reached by version A and version B. Afterwards, the deployment risk will be calculated based on individual risk scores.

      If the deployment failure risk is greater than a predefined threshold, the verification gate blocks the deployment from going to the target environment. In such case, the Deployment Risk stage of the pipeline is not passed, and additional attention is required. The exact log messages can be displayed in the Logsight verification screen, to help debug the problem.

      Use Logsight for EDP Development⚓︎

      Please find below the detailed description of Logsight integration with EDP.

      Deployment Approach⚓︎

      EDP uses Logsight in a self-deploying mode.

      Logsight provides a deployment approach using Helm charts. Please find below the stack of components that must be deployed:

      • logsight - the core component.
      • logsight-backend - the backend that provides all necessary APIs and user management.
      • logsight-frontend - the frontend that provides the user interface.
      • logsight-result-api - responsible for obtaining results, for example, during the verification.

      Below is a diagram of interaction when integrating the components:

      Logsight Structure
      Logsight Structure

      Configure FluentBit for Sending Log Data⚓︎

      Logsight is integrated with the EDP logging stack. The integration is based on top of the EFK (ElasticSearch-FluentBit-Kibana) stack. It is necessary to deploy a stack with the security support, namely, enable the certificate support.

      A FluentBit config indicates the namespace from which the logs will be received for further analysis. Below is an example of the FluentBit config for getting logs from the edp-delivery-edp-delivery-sit namespace:

      View: fluent-bit.conf
      [INPUT]
           Name              tail
           Tag               kube.sit.*
           Path              /var/log/containers/*edp-delivery-edp-delivery-sit*.log
      @@ -66,4 +66,4 @@
           Format json
           json_date_format iso8601
           json_date_key timestamp
      -

      Deployment Risk Analysis⚓︎

      A deployment-risk stage is added to the EDP CD pipeline.

      Deployment Risk
      Deployment Risk

      If the deployment risk is above 70%, the red state of the pipeline is expected.

      EDP consists of a set of containerized components. For the convenience of tracking the risk deployment trend for each component, this data is stored as Jenkins artifacts.

      If the deployment risk is higher than the threshold of 70%, the EDP promotion of artifacts for the next environments does not pass. The deployment risk report can be analyzed in order to avoid the potential problems with updating the components.

      To study the report in detail, use the link from the Jenkins pipeline to the Logsight verification screen:

      Logsight Insights
      Logsight Insights
      Logsight Insights
      Logsight Insights

      In this example, logs from different versions of the gerrit-operator were analyzed. As can be seen from the report, a large number of new messages appeared in the logs, and the output frequency of other notifications has also changed, which led to the high deployment risk.

      The environment on which the analysis is performed can exist for different time periods. Logsight only processes the minimum total number of logs since the creating of the environment.

      \ No newline at end of file +

      Deployment Risk Analysis⚓︎

      A deployment-risk stage is added to the EDP CD pipeline.

      Deployment Risk
      Deployment Risk

      If the deployment risk is above 70%, the red state of the pipeline is expected.

      EDP consists of a set of containerized components. For the convenience of tracking the risk deployment trend for each component, this data is stored as Jenkins artifacts.

      If the deployment risk is higher than the threshold of 70%, the EDP promotion of artifacts for the next environments does not pass. The deployment risk report can be analyzed in order to avoid the potential problems with updating the components.

      To study the report in detail, use the link from the Jenkins pipeline to the Logsight verification screen:

      Logsight Insights
      Logsight Insights
      Logsight Insights
      Logsight Insights

      In this example, logs from different versions of the gerrit-operator were analyzed. As can be seen from the report, a large number of new messages appeared in the logs, and the output frequency of other notifications has also changed, which led to the high deployment risk.

      The environment on which the analysis is performed can exist for different time periods. Logsight only processes the minimum total number of logs since the creating of the environment.

      \ No newline at end of file diff --git a/operator-guide/loki-irsa/index.html b/operator-guide/loki-irsa/index.html index ab4149e2c..f96c06484 100644 --- a/operator-guide/loki-irsa/index.html +++ b/operator-guide/loki-irsa/index.html @@ -1,4 +1,4 @@ - IAM Roles for Loki Service Accounts - EPAM Delivery Platform

      IAM Roles for Loki Service Accounts⚓︎

      Note

      Make sure that IRSA is enabled and amazon-eks-pod-identity-webhook is deployed according to the Associate IAM Roles With Service Accounts documentation.

      It is possible to use Amazon Simple Storage Service Amazon S3 as object storage for Loki. In this case Loki requires access to AWS resources. Follow the steps below to create a required role:

      1. Create AWS IAM Policy "AWSIRSA‹CLUSTER_NAME›‹LOKI_NAMESPACE›Loki_policy":

        {
        + IAM Roles for Loki Service Accounts - EPAM Delivery Platform      

        IAM Roles for Loki Service Accounts⚓︎

        Note

        Make sure that IRSA is enabled and amazon-eks-pod-identity-webhook is deployed according to the Associate IAM Roles With Service Accounts documentation.

        It is possible to use Amazon Simple Storage Service Amazon S3 as object storage for Loki. In this case Loki requires access to AWS resources. Follow the steps below to create a required role:

        1. Create AWS IAM Policy "AWSIRSA‹CLUSTER_NAME›‹LOKI_NAMESPACE›Loki_policy":

          {
               "Version": "2012-10-17",
               "Statement": [
                   {
          @@ -42,4 +42,4 @@
              }
            ]
           }
          -
        2. Attach the "AWSIRSA‹CLUSTER_NAME›‹LOKI_NAMESPACE›Loki_policy" policy to the "AWSIRSA‹CLUSTER_NAME›‹LOKI_NAMESPACE›Loki" role.

        3. Make sure that Amazon S3 bucket with name loki-‹CLUSTER_NAME› exists.

        4. Provide key value eks.amazonaws.com/role-arn: "arn:aws:iam:::role/AWSIRSA‹CLUSTER_NAME›‹LOKI_NAMESPACE›Loki" into the serviceAccount.annotations parameter in values.yaml during the Loki Installation.

        \ No newline at end of file +
      2. Attach the "AWSIRSA‹CLUSTER_NAME›‹LOKI_NAMESPACE›Loki_policy" policy to the "AWSIRSA‹CLUSTER_NAME›‹LOKI_NAMESPACE›Loki" role.

      3. Make sure that Amazon S3 bucket with name loki-‹CLUSTER_NAME› exists.

      4. Provide key value eks.amazonaws.com/role-arn: "arn:aws:iam:::role/AWSIRSA‹CLUSTER_NAME›‹LOKI_NAMESPACE›Loki" into the serviceAccount.annotations parameter in values.yaml during the Loki Installation.

      \ No newline at end of file diff --git a/operator-guide/manage-custom-certificate/index.html b/operator-guide/manage-custom-certificate/index.html index cb67420a3..af167fc73 100644 --- a/operator-guide/manage-custom-certificate/index.html +++ b/operator-guide/manage-custom-certificate/index.html @@ -1,4 +1,4 @@ - Manage Custom Certificates - EPAM Delivery Platform

      Manage Custom Certificates⚓︎

      Familiarize yourself with the detailed instructions on adding certificates to EDP resources as well as with the respective setup for Keycloak.

      EDP components that support custom certificates can be found in the table below:

      Helm Chart Sub Resources
      admin-console-operator admin-console
      gerrit-operator edp-gerrit
      jenkins-operator jenkins-operator, edp-jenkins, jenkins agents
      sonar-operator sonar-operator, edp-sonar
      keycloak-operator keycloak-operator
      nexus-operator oauth2-proxy
      edp-install oauth2-proxy
      edp-headlamp edp-headlamp

      Prerequisites⚓︎

      • The certificate in the *.crt format is used;
      • Kubectl version 1.23.0 is installed;
      • Helm version 3.10.2 is installed;
      • Java with the keytool command inside;
      • jq is installed.

      Enable the SPI Truststore of Keycloak⚓︎

      To import custom certificates to Keycloak, follow the steps below:

      1. Generate the cacerts local keystore and import the certificate there using the keytool tool:

        keytool -importcert -file CA.crt \
        + Manage Custom Certificates - EPAM Delivery Platform      

        Manage Custom Certificates⚓︎

        Familiarize yourself with the detailed instructions on adding certificates to EDP resources as well as with the respective setup for Keycloak.

        EDP components that support custom certificates can be found in the table below:

        Helm Chart Sub Resources
        admin-console-operator admin-console
        gerrit-operator edp-gerrit
        jenkins-operator jenkins-operator, edp-jenkins, jenkins agents
        sonar-operator sonar-operator, edp-sonar
        keycloak-operator keycloak-operator
        nexus-operator oauth2-proxy
        edp-install oauth2-proxy
        edp-headlamp edp-headlamp

        Prerequisites⚓︎

        • The certificate in the *.crt format is used;
        • Kubectl version 1.23.0 is installed;
        • Helm version 3.10.2 is installed;
        • Java with the keytool command inside;
        • jq is installed.

        Enable the SPI Truststore of Keycloak⚓︎

        To import custom certificates to Keycloak, follow the steps below:

        1. Generate the cacerts local keystore and import the certificate there using the keytool tool:

          keytool -importcert -file CA.crt \
            -alias CA.crt -keystore ./cacerts \
            -storepass changeit -trustcacerts \
            -noprompt
          @@ -123,4 +123,4 @@
           ...
           
        2. Reload the Jenkins pod:

          ns="edp"
           kubectl rollout restart -n "${ns}" deployment/jenkins
          -
        \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/migrate-ci-pipelines-from-jenkins-to-tekton/index.html b/operator-guide/migrate-ci-pipelines-from-jenkins-to-tekton/index.html index abc00dfe7..7059fab2f 100644 --- a/operator-guide/migrate-ci-pipelines-from-jenkins-to-tekton/index.html +++ b/operator-guide/migrate-ci-pipelines-from-jenkins-to-tekton/index.html @@ -1,4 +1,4 @@ - Migrate CI Pipelines From Jenkins to Tekton - EPAM Delivery Platform

      Migrate CI Pipelines From Jenkins to Tekton⚓︎

      To migrate the CI pipelines for a codebase from Jenkins to Tekton, follow the steps below:

      Deploy a Custom EDP Scenario With Tekton and Jenkins CI Tools⚓︎

      Make sure that Tekton stack is deployed according to the documentation. Enable Tekton as an EDP subcomponent:

      values.yaml
      edp-tekton:
      + Migrate CI Pipelines From Jenkins to Tekton - EPAM Delivery Platform      

      Migrate CI Pipelines From Jenkins to Tekton⚓︎

      To migrate the CI pipelines for a codebase from Jenkins to Tekton, follow the steps below:

      Deploy a Custom EDP Scenario With Tekton and Jenkins CI Tools⚓︎

      Make sure that Tekton stack is deployed according to the documentation. Enable Tekton as an EDP subcomponent:

      values.yaml
      edp-tekton:
         enabled: true
       

      Disable Jenkins Triggers⚓︎

      To disable Jenkins Triggers for the codebase, add the following code to the provisioner:

      job-provisioner
      def tektonCodebaseList = ["<codebase_name>"]
       if (!tektonCodebaseList.contains(codebaseName.toString())){
      @@ -29,4 +29,4 @@
               maxTries = 1
               retryInterval = 5000
               threadPoolSize = 3
      -

      Switch CI Tool for Codebase(s)⚓︎

      Go to the codebase Custom Resource and change the spec.ciTool field from jenkins to tekton.

      \ No newline at end of file +

      Switch CI Tool for Codebase(s)⚓︎

      Go to the codebase Custom Resource and change the spec.ciTool field from jenkins to tekton.

      \ No newline at end of file diff --git a/operator-guide/multitenant-logging/index.html b/operator-guide/multitenant-logging/index.html index f45847fbb..0a460448a 100644 --- a/operator-guide/multitenant-logging/index.html +++ b/operator-guide/multitenant-logging/index.html @@ -1 +1 @@ - Multitenant Logging - EPAM Delivery Platform

      Multitenant Logging⚓︎

      Get acquainted with the multitenant logging components and the project logs location in the Shared cluster.

      Logging Components⚓︎

      To configure the multitenant logging, it is necessary to deploy the following components:

      In Grafana, every tenant represents an organization, i.e. it is necessary to create an organization for every namespace in the cluster. To get more details regarding the architecture of the Logging Operator, please review the Diagram 1.

      Logging operator scheme
      Logging operator scheme

      Note

      It is necessary to deploy Loki with the auth_enabled: true flag with the aim to ensure that the logs are separated for each tenant. For the authentication, Loki requires the HTTP header X-Scope-OrgID.

      Review Project Logs in Grafana⚓︎

      To find the project logs, navigate to Grafana and follow the steps below:

      Note

      Grafana is a common service for different customers where each customer works in its own separated Grafana Organization and doesn't have any access to another project.

      1. Choose the organization by clicking the Current Organization drop-down list. If a user is assigned to several organizations, switch easily by using the Switch button.

        Current organization
        Current organization

      2. Navigate to the left-side menu and click the Explore button to see the Log Browser:

        Grafana explore
        Grafana explore

      3. Click the Log Browser button to see the labels that can be used to filter logs (e.g., hostname, namespace, application name, pod, etc.):

        Note

        Enable the correct data source, select the relevant logging data from the top left-side corner, and pay attention that the data source name always follows the ‹project_name›-logging pattern.

        Log browser
        Log browser

      4. Filter out logs by clicking the Show logs button or write the query and click the Run query button.

      5. Review the results with the quantity of logs per time, see the example below:

        Logs example
        Logs example

        • Expand the logs to get detailed information about the object entry:

        Expand logs
        Expand logs

        • Use the following buttons to include or remove the labels from the query:

        Addition button
        Addition button

        • See the ad-hoc statistics for a particular label:

        Ad-hoc stat example
        Ad-hoc stat example

      \ No newline at end of file + Multitenant Logging - EPAM Delivery Platform

      Multitenant Logging⚓︎

      Get acquainted with the multitenant logging components and the project logs location in the Shared cluster.

      Logging Components⚓︎

      To configure the multitenant logging, it is necessary to deploy the following components:

      In Grafana, every tenant represents an organization, i.e. it is necessary to create an organization for every namespace in the cluster. To get more details regarding the architecture of the Logging Operator, please review the Diagram 1.

      Logging operator scheme
      Logging operator scheme

      Note

      It is necessary to deploy Loki with the auth_enabled: true flag with the aim to ensure that the logs are separated for each tenant. For the authentication, Loki requires the HTTP header X-Scope-OrgID.

      Review Project Logs in Grafana⚓︎

      To find the project logs, navigate to Grafana and follow the steps below:

      Note

      Grafana is a common service for different customers where each customer works in its own separated Grafana Organization and doesn't have any access to another project.

      1. Choose the organization by clicking the Current Organization drop-down list. If a user is assigned to several organizations, switch easily by using the Switch button.

        Current organization
        Current organization

      2. Navigate to the left-side menu and click the Explore button to see the Log Browser:

        Grafana explore
        Grafana explore

      3. Click the Log Browser button to see the labels that can be used to filter logs (e.g., hostname, namespace, application name, pod, etc.):

        Note

        Enable the correct data source, select the relevant logging data from the top left-side corner, and pay attention that the data source name always follows the ‹project_name›-logging pattern.

        Log browser
        Log browser

      4. Filter out logs by clicking the Show logs button or write the query and click the Run query button.

      5. Review the results with the quantity of logs per time, see the example below:

        Logs example
        Logs example

        • Expand the logs to get detailed information about the object entry:

        Expand logs
        Expand logs

        • Use the following buttons to include or remove the labels from the query:

        Addition button
        Addition button

        • See the ad-hoc statistics for a particular label:

        Ad-hoc stat example
        Ad-hoc stat example

      \ No newline at end of file diff --git a/operator-guide/namespace-management/index.html b/operator-guide/namespace-management/index.html index 2662eb9a1..54022fd21 100644 --- a/operator-guide/namespace-management/index.html +++ b/operator-guide/namespace-management/index.html @@ -1,4 +1,4 @@ - Manage Namespace - EPAM Delivery Platform

      Manage Namespace⚓︎

      EDP provides the ability to deploy services to namespaces. By default, EDP creates these namespaces automatically. This chapter describes the alternative way of namespace creation and management.

      Overview⚓︎

      Namespaces are typically created by the platform when running CD Pipelines. The operator creates them according to the specific format: edp-<application-name>-<stage-name>. The cd-pipeline-operator should have the permissions to automatically create namespaces when deploying applications and delete them when uninstalling applications.

      Disable Automatic Namespace Creation⚓︎

      Occasionally, there are cases when automatic creation of namespaces is not allowed. For example, due to security reasons of the project, EDP user may need to disable this setting. This option is manipulated by the manageNamespace parameter which is located in the values.yaml file. The manageNamespace parameter is set to true by default, but it can be changed to false. As an aftermath, after setting the manageNamespace parameter users are supposed to face the problem that they can not deploy their application in EDP Portal UI because of permission restrictions:

      Namespace creation error
      Namespace creation error

      The error message shown above says that user needs to create the namespace in the edp-<application-name>-<stage-name> format first before creating stages. In addition to it, the cd-pipeline-operator must be granted with the administrator permissions to have the ability to manage this namespace. To create namespace manually, follow the steps below:

      1. Create the namespace by running the command below:

         kubectl create namespace edp-<pipelineName>-<stageName>
        + Manage Namespace - EPAM Delivery Platform      

        Manage Namespace⚓︎

        EDP provides the ability to deploy services to namespaces. By default, EDP creates these namespaces automatically. This chapter describes the alternative way of namespace creation and management.

        Overview⚓︎

        Namespaces are typically created by the platform when running CD Pipelines. The operator creates them according to the specific format: edp-<application-name>-<stage-name>. The cd-pipeline-operator should have the permissions to automatically create namespaces when deploying applications and delete them when uninstalling applications.

        Disable Automatic Namespace Creation⚓︎

        Occasionally, there are cases when automatic creation of namespaces is not allowed. For example, due to security reasons of the project, EDP user may need to disable this setting. This option is manipulated by the manageNamespace parameter which is located in the values.yaml file. The manageNamespace parameter is set to true by default, but it can be changed to false. As an aftermath, after setting the manageNamespace parameter users are supposed to face the problem that they can not deploy their application in EDP Portal UI because of permission restrictions:

        Namespace creation error
        Namespace creation error

        The error message shown above says that user needs to create the namespace in the edp-<application-name>-<stage-name> format first before creating stages. In addition to it, the cd-pipeline-operator must be granted with the administrator permissions to have the ability to manage this namespace. To create namespace manually, follow the steps below:

        1. Create the namespace by running the command below:

           kubectl create namespace edp-<pipelineName>-<stageName>
           

          Note

          The edp-<pipelineName>-<stageName> format for namespaces is set by default but is not mandatory. You can set your custom namespace when creating an Environment.

        2. Create the administrator RoleBinding resource by applying the file below with the kubectl apply -f grant_admin_permissions.yaml command:

          View: grant_admin_permissions.yaml
           kind: RoleBinding
            apiVersion: rbac.authorization.k8s.io/v1
            metadata:
          @@ -12,4 +12,4 @@
              apiGroup: rbac.authorization.k8s.io
              kind: ClusterRole
              name: admin
          -
        3. Restart the cd-pipeline-operator pod, in order not to wait for the operator reconciliation.

        CD Pipeline Operator RBAC Model⚓︎

        The manageNamespace parameter also defines the resources that will be created depending on the cluster deployed whether it is OpenShift or Kubernetes. This scheme displays the nesting of operator input parameters:

        CD Pipeline Operator Input Parameter Scheme
        CD Pipeline Operator Input Parameter Scheme

        Note

        When deploying application on the OpenShift cluster, the registry-view RoleBinding is created in the main namespace.

        \ No newline at end of file +
      2. Restart the cd-pipeline-operator pod, in order not to wait for the operator reconciliation.

      CD Pipeline Operator RBAC Model⚓︎

      The manageNamespace parameter also defines the resources that will be created depending on the cluster deployed whether it is OpenShift or Kubernetes. This scheme displays the nesting of operator input parameters:

      CD Pipeline Operator Input Parameter Scheme
      CD Pipeline Operator Input Parameter Scheme

      Note

      When deploying application on the OpenShift cluster, the registry-view RoleBinding is created in the main namespace.

      \ No newline at end of file diff --git a/operator-guide/nexus-sonatype/index.html b/operator-guide/nexus-sonatype/index.html index 291112ed4..182cee54a 100644 --- a/operator-guide/nexus-sonatype/index.html +++ b/operator-guide/nexus-sonatype/index.html @@ -1,4 +1,4 @@ - Nexus Sonatype - EPAM Delivery Platform

      Nexus Sonatype Integration⚓︎

      This documentation guide provides comprehensive instructions for integrating Nexus with the EPAM Delivery Platform.

      Info

      In EDP release 3.5, we have changed the deployment strategy for the nexus-operator component, now it is not installed by default. The nexusURL parameter management has been transferred from the values.yaml file to Kubernetes secrets.

      Prerequisites⚓︎

      Before proceeding, ensure that you have the following prerequisites:

      • Kubectl version 1.26.0 is installed.
      • Helm version 3.12.0+ is installed.

      Installation⚓︎

      To install Nexus with pre-defined templates, use the nexus-operator installed via Cluster Add-Ons approach.

      Configuration⚓︎

      To ensure strong authentication and accurate access control, creating a Nexus Sonatype service account with the name ci.user is crucial. This user serves as a unique identifier, facilitating connection with the EDP ecosystem.

      To create the Nexus ci.userand define repository parameters follow the steps below:

      1. Open the Nexus UI and navigate to Server administration and configuration -> Security -> User. Click the Create local user button to create a new user:

        Nexus user settings
        Nexus user settings

      2. Type the ci-user username, define an expiration period, and click the Generate button to create the token:

        Nexus create user
        Nexus create user

      3. EDP relies on a predetermined repository naming convention all repository names are predefined. Navigate to Server administration and configuration -> Repository -> Repositories in Nexus. You can only create a repository with the required language.

        Nexus repository list
        Nexus repository list

        a) Click Create a repository by selecting "maven2(proxy)" and set the name as "edp-maven-proxy". Enter the remote storage URL as "https://repo1.maven.org/maven2/". Save the configuration.

        b) Click Create a repository by selecting "maven2(hosted)" and set the name as "edp-maven-snapshot". Change the Version policy to "snapshot". Save the configuration.

        c) Click Create a repository by selecting "maven2(hosted)" and set the name as "edp-maven-releases". Change the Version policy to "release". Save the configuration.

        d) Click Create a repository by selecting "maven2(group)" and set the name as "edp-maven-group". Change the Version policy to "release". Add repository to group. Save the configuration.

        a) Click Create a repository by selecting "npm(proxy)" and set the name as "edp-npm-proxy". Enter the remote storage URL as "https://registry.npmjs.org". Save the configuration.

        b) Click Create a repository by selecting "npm(hosted)" and set the name as "edp-npm-snapshot". Save the configuration.

        c) Click Create a repository by selecting "npm(hosted)" and set the name as "edp-npm-releases". Save the configuration.

        d) Click Create a repository by selecting "npm(hosted)" and set the name as "edp-npm-hosted". Save the configuration.

        e) Click Create a repository by selecting "npm(group)" and set the name as "edp-npm-group". Add repository to group. Save the configuration.

        a) Click Create a repository by selecting "nuget(proxy)" and set the name as "edp-dotnet-proxy". Select Protocol version NuGet V3. Enter the remote storage URL as "https://api.nuget.org/v3/index.json". Save the configuration.

        b) Click Create a repository by selecting "nuget(hosted)" and set the name as "edp-dotnet-snapshot". Save the configuration.

        c) Click Create a repository by selecting "nuget(hosted)" and set the name as "edp-dotnet-releases". Save the configuration.

        d) Click Create a repository by selecting "nuget(hosted)" and set the name as "edp-dotnet-hosted". Save the configuration.

        e) Click Create a repository by selecting "nuget(group)" and set the name as "edp-dotnet-group". Add repository to group. Save the configuration.

        a) Click Create a repository by selecting "pypi(proxy)" and set the name as "edp-python-proxy". Enter the remote storage URL as "https://pypi.org". Save the configuration.

        b) Click Create a repository by selecting "pypi(hosted)" and set the name as "edp-python-snapshot". Save the configuration.

        c) Click Create a repository by selecting "pypi(hosted)" and set the name as "edp-python-releases". Save the configuration.

        d) Click Create a repository by selecting "pypi(group)" and set the name as "edp-python-group". Add repository to group. Save the configuration.

      4. Provision secrets using manifest, EDP Portal or with the externalSecrets operator

      Go to EDP Portal -> EDP -> Configuration -> Nexus. Update or fill in the URL, nexus-user-id, nexus-user-password and click the Save button:

      Nexus update manual secret
      Nexus update manual secret

      apiVersion: v1
      + Nexus Sonatype - EPAM Delivery Platform      

      Nexus Sonatype Integration⚓︎

      This documentation guide provides comprehensive instructions for integrating Nexus with the EPAM Delivery Platform.

      Info

      In EDP release 3.5, we have changed the deployment strategy for the nexus-operator component, now it is not installed by default. The nexusURL parameter management has been transferred from the values.yaml file to Kubernetes secrets.

      Prerequisites⚓︎

      Before proceeding, ensure that you have the following prerequisites:

      • Kubectl version 1.26.0 is installed.
      • Helm version 3.12.0+ is installed.

      Installation⚓︎

      To install Nexus with pre-defined templates, use the nexus-operator installed via Cluster Add-Ons approach.

      Configuration⚓︎

      To ensure strong authentication and accurate access control, creating a Nexus Sonatype service account with the name ci.user is crucial. This user serves as a unique identifier, facilitating connection with the EDP ecosystem.

      To create the Nexus ci.userand define repository parameters follow the steps below:

      1. Open the Nexus UI and navigate to Server administration and configuration -> Security -> User. Click the Create local user button to create a new user:

        Nexus user settings
        Nexus user settings

      2. Type the ci-user username, define an expiration period, and click the Generate button to create the token:

        Nexus create user
        Nexus create user

      3. EDP relies on a predetermined repository naming convention all repository names are predefined. Navigate to Server administration and configuration -> Repository -> Repositories in Nexus. You can only create a repository with the required language.

        Nexus repository list
        Nexus repository list

        a) Click Create a repository by selecting "maven2(proxy)" and set the name as "edp-maven-proxy". Enter the remote storage URL as "https://repo1.maven.org/maven2/". Save the configuration.

        b) Click Create a repository by selecting "maven2(hosted)" and set the name as "edp-maven-snapshot". Change the Version policy to "snapshot". Save the configuration.

        c) Click Create a repository by selecting "maven2(hosted)" and set the name as "edp-maven-releases". Change the Version policy to "release". Save the configuration.

        d) Click Create a repository by selecting "maven2(group)" and set the name as "edp-maven-group". Change the Version policy to "release". Add repository to group. Save the configuration.

        a) Click Create a repository by selecting "npm(proxy)" and set the name as "edp-npm-proxy". Enter the remote storage URL as "https://registry.npmjs.org". Save the configuration.

        b) Click Create a repository by selecting "npm(hosted)" and set the name as "edp-npm-snapshot". Save the configuration.

        c) Click Create a repository by selecting "npm(hosted)" and set the name as "edp-npm-releases". Save the configuration.

        d) Click Create a repository by selecting "npm(hosted)" and set the name as "edp-npm-hosted". Save the configuration.

        e) Click Create a repository by selecting "npm(group)" and set the name as "edp-npm-group". Add repository to group. Save the configuration.

        a) Click Create a repository by selecting "nuget(proxy)" and set the name as "edp-dotnet-proxy". Select Protocol version NuGet V3. Enter the remote storage URL as "https://api.nuget.org/v3/index.json". Save the configuration.

        b) Click Create a repository by selecting "nuget(hosted)" and set the name as "edp-dotnet-snapshot". Save the configuration.

        c) Click Create a repository by selecting "nuget(hosted)" and set the name as "edp-dotnet-releases". Save the configuration.

        d) Click Create a repository by selecting "nuget(hosted)" and set the name as "edp-dotnet-hosted". Save the configuration.

        e) Click Create a repository by selecting "nuget(group)" and set the name as "edp-dotnet-group". Add repository to group. Save the configuration.

        a) Click Create a repository by selecting "pypi(proxy)" and set the name as "edp-python-proxy". Enter the remote storage URL as "https://pypi.org". Save the configuration.

        b) Click Create a repository by selecting "pypi(hosted)" and set the name as "edp-python-snapshot". Save the configuration.

        c) Click Create a repository by selecting "pypi(hosted)" and set the name as "edp-python-releases". Save the configuration.

        d) Click Create a repository by selecting "pypi(group)" and set the name as "edp-python-group". Add repository to group. Save the configuration.

      4. Provision secrets using manifest, EDP Portal or with the externalSecrets operator

      Go to EDP Portal -> EDP -> Configuration -> Nexus. Update or fill in the URL, nexus-user-id, nexus-user-password and click the Save button:

      Nexus update manual secret
      Nexus update manual secret

      apiVersion: v1
       kind: Secret
       metadata:
         name: ci-nexus
      @@ -17,4 +17,4 @@
         "username": "XXXXXXX",
         "password": "XXXXXXX"
       },
      -

      Go to EDP Portal -> EDP -> Configuration -> Nexus and see Managed by External Secret message.

      Nexus managed by external secret operator
      Nexus managed by external secret operator

      More detail of External Secrets Operator Integration can found on the following page

      \ No newline at end of file +

      Go to EDP Portal -> EDP -> Configuration -> Nexus and see Managed by External Secret message.

      Nexus managed by external secret operator
      Nexus managed by external secret operator

      More detail of External Secrets Operator Integration can found on the following page

      \ No newline at end of file diff --git a/operator-guide/notification-msteams/index.html b/operator-guide/notification-msteams/index.html index 34e84b280..4b2d85187 100644 --- a/operator-guide/notification-msteams/index.html +++ b/operator-guide/notification-msteams/index.html @@ -1,4 +1,4 @@ - MS Teams Notification - EPAM Delivery Platform

      Microsoft Teams Notification⚓︎

      This section describes how to set up and add notification status to Tekton pipelines by sending pipeline status to the Microsoft Teams channel.

      Create Incoming WebHook⚓︎

      To create a link to Incoming Webhook for the Microsoft Teams channel, follow the steps below:

      1. Open the channel which will be receiving notifications and click the ••• button from the upper-right corner. Select Connectors in the dropdown menu:

      Menu
      Microsoft Teams menu

      2. In the search field, type Incoming Webhook and click Configure:

      Connectors
      Connectors

      3. Provide a name and upload an image for the webhook if necessary. Click Create:

      Connectors setup
      Connectors setup

      4. Copy and save the unique WebHookURL presented in the dialog. Click Done:

      WebhookURL
      WebHookURL

      5. Create a secret with the within the edp namespace.

        kubectl -n edp create secret generic microsoft-teams-webhook-url-secret \
      + MS Teams Notification - EPAM Delivery Platform      

      Microsoft Teams Notification⚓︎

      This section describes how to set up and add notification status to Tekton pipelines by sending pipeline status to the Microsoft Teams channel.

      Create Incoming WebHook⚓︎

      To create a link to Incoming Webhook for the Microsoft Teams channel, follow the steps below:

      1. Open the channel which will be receiving notifications and click the ••• button from the upper-right corner. Select Connectors in the dropdown menu:

      Menu
      Microsoft Teams menu

      2. In the search field, type Incoming Webhook and click Configure:

      Connectors
      Connectors

      3. Provide a name and upload an image for the webhook if necessary. Click Create:

      Connectors setup
      Connectors setup

      4. Copy and save the unique WebHookURL presented in the dialog. Click Done:

      WebhookURL
      WebHookURL

      5. Create a secret with the within the edp namespace.

        kubectl -n edp create secret generic microsoft-teams-webhook-url-secret \
         --from-literal=url=<webhookURL>
       

      6. Add the notification task to the pipeline and add the code below in final-block in the pipeline and save:

      {{ include "send-to-microsoft-teams-build" . | nindent 4 }}
       

      Customize Notification Message⚓︎

      To make notification message informative, relevant text should be added to the message. Here are the steps to implement it:

      1. Create a new pipeline with a unique name or modify your custom pipeline created before.

      2. Add the task below in the finally block with a unique name. Edit the params.message value if necessary:

      View: Task send-to-microsoft-teams
      - name: 'microsoft-teams-pipeline-status-notification-failed
      @@ -19,4 +19,4 @@
           values:
           - Failed
           - PipelineRunTimeout
      -

      After customization, the following message is supposed to appear in the channel when failing pipelines:

      Notification example
      Notification example

      \ No newline at end of file +

      After customization, the following message is supposed to appear in the channel when failing pipelines:

      Notification example
      Notification example

      \ No newline at end of file diff --git a/operator-guide/oauth2-proxy/index.html b/operator-guide/oauth2-proxy/index.html index 63c8a91ad..e4070e9c7 100644 --- a/operator-guide/oauth2-proxy/index.html +++ b/operator-guide/oauth2-proxy/index.html @@ -1,7 +1,7 @@ - Protect Endpoints - EPAM Delivery Platform

      Protect Endpoints⚓︎

      OAuth2-Proxy is a versatile tool that serves as a reverse proxy, utilizing the OAuth 2.0 protocol with various providers like Google, GitHub, and Keycloak to provide both authentication and authorization. This guide instructs readers on how to protect their applications' endpoints using OAuth2-Proxy. By following these steps, users can strengthen their endpoints' security without modifying their current application code. In the context of EDP, it has integration with the Keycloak OIDC provider, enabling it to link with any component that lacks built-in authentication.

      Note

      OAuth2-Proxy is disabled by default when installing EDP.

      Prerequisites⚓︎

      Enable OAuth2-Proxy⚓︎

      Enabling OAuth2-Proxy implies the following general steps:

      1. Update your EDP deployment using command --set 'sso.enabled=true' or the --values file by enabling the sso parameter.
      2. Check that OAuth2-Proxy is deployed successfully.
      3. Enable authentication for your Ingress by adding auth-signin and auth-url of OAuth2-Proxy to its annotation.

      This will deploy and connect OAuth2-Proxy to your application endpoint.

      Enable OAuth2-Proxy on Tekton Dashboard⚓︎

      The example below illustrates how to use OAuth2-Proxy in practice when using the Tekton dashboard:

      1. Run helm upgrade to update edp-install release:
        helm upgrade --version <version> --set 'sso.enabled=true' edp-install --namespace edp
        + Protect Endpoints - EPAM Delivery Platform      

        Protect Endpoints⚓︎

        OAuth2-Proxy is a versatile tool that serves as a reverse proxy, utilizing the OAuth 2.0 protocol with various providers like Google, GitHub, and Keycloak to provide both authentication and authorization. This guide instructs readers on how to protect their applications' endpoints using OAuth2-Proxy. By following these steps, users can strengthen their endpoints' security without modifying their current application code. In the context of EDP, it has integration with the Keycloak OIDC provider, enabling it to link with any component that lacks built-in authentication.

        Note

        OAuth2-Proxy is disabled by default when installing EDP.

        Prerequisites⚓︎

        Enable OAuth2-Proxy⚓︎

        Enabling OAuth2-Proxy implies the following general steps:

        1. Update your EDP deployment using command --set 'sso.enabled=true' or the --values file by enabling the sso parameter.
        2. Check that OAuth2-Proxy is deployed successfully.
        3. Enable authentication for your Ingress by adding auth-signin and auth-url of OAuth2-Proxy to its annotation.

        This will deploy and connect OAuth2-Proxy to your application endpoint.

        Enable OAuth2-Proxy on Tekton Dashboard⚓︎

        The example below illustrates how to use OAuth2-Proxy in practice when using the Tekton dashboard:

        1. Run helm upgrade to update edp-install release:
          helm upgrade --version <version> --set 'sso.enabled=true' edp-install --namespace edp
           
        2. Check that OAuth2-Proxy is deployed successfully.
        3. Edit the Tekton dashboard Ingress annotation by adding auth-signin and auth-url of oauth2-proxy by kubectl command:
          kubectl annotate ingress <application-ingress-name> nginx.ingress.kubernetes.io/auth-signin='https://<oauth-ingress-host>/oauth2/start?rd=https://$host$request_uri' nginx.ingress.kubernetes.io/auth-url='http://oauth2-proxy.edp.svc.cluster.local:8080/oauth2/auth'
           
        1. Generate a cookie-secret for proxy with the following command:
          tekton_dashboard_cookie_secret=$(openssl rand -base64 32 | head -c 32)
           
        2. Create tekton-dashboard-proxy-cookie-secret in the edp namespace:
          kubectl -n edp create secret generic tekton-dashboard-proxy-cookie-secret \
               --from-literal=cookie-secret=${tekton_dashboard_cookie_secret}
           
        3. Run helm upgrade to update edp-install release:
          helm upgrade --version <version> --set 'edp-tekton.dashboard.openshift_proxy.enabled=true' edp-install --namespace edp
          -
        \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/openshift-cluster-settings/index.html b/operator-guide/openshift-cluster-settings/index.html index 1048e89ba..489c37ca4 100644 --- a/operator-guide/openshift-cluster-settings/index.html +++ b/operator-guide/openshift-cluster-settings/index.html @@ -1,4 +1,4 @@ - Set Up OpenShift - EPAM Delivery Platform

      Set Up OpenShift⚓︎

      Make sure the cluster meets the following conditions:

      1. OpenShift cluster is installed with minimum 2 worker nodes with total capacity 8 Cores and 32Gb RAM.

      2. Load balancer (if any exists in front of OpenShift router or ingress controller) is configured with session stickiness, disabled HTTP/2 protocol and header size of 64k support.

        Find below an example of the Config Map for the NGINX Ingress Controller:

        kind: ConfigMap
        + Set Up OpenShift - EPAM Delivery Platform      

        Set Up OpenShift⚓︎

        Make sure the cluster meets the following conditions:

        1. OpenShift cluster is installed with minimum 2 worker nodes with total capacity 8 Cores and 32Gb RAM.

        2. Load balancer (if any exists in front of OpenShift router or ingress controller) is configured with session stickiness, disabled HTTP/2 protocol and header size of 64k support.

          Find below an example of the Config Map for the NGINX Ingress Controller:

          kind: ConfigMap
           apiVersion: v1
           metadata:
             name: nginx-configuration
          @@ -43,4 +43,4 @@
           reclaimPolicy: Retain
           volumeBindingMode: WaitForFirstConsumer
           allowVolumeExpansion: true
          -
      \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/overview-devsecops/index.html b/operator-guide/overview-devsecops/index.html index d78af20ae..2275ec959 100644 --- a/operator-guide/overview-devsecops/index.html +++ b/operator-guide/overview-devsecops/index.html @@ -1 +1 @@ - Secure Delivery - EPAM Delivery Platform

      Secure Delivery on the Platform⚓︎

      The EPAM Delivery Platform emphasizes the importance of incorporating security practices into the software development lifecycle through the DevSecOps approach. By integrating a diverse range of open-source and enterprise security tools tailored to specific functionalities, organizations can ensure efficient and secure software development. These tools, combined with fundamental DevSecOps principles such as collaboration, continuous security, and automation, contribute to the identification and remediation of vulnerabilities early in the process, minimizes risks, and fosters a security-first culture across the organization.

      The EPAM Delivery Platform enabling seamless integration with various security tools and vulnerability management systems, enhancing the security of source code and ensuring compliance.

      Supported Solutions⚓︎

      The below table categorizes various open-source and enterprise security tools based on their specific functionalities. It provides a comprehensive view of the available options for each security aspect. This classification facilitates informed decision-making when selecting and integrating security tools into a development pipeline, ensuring an efficient and robust security stance. EDP supports the integration of both open-source and enterprise security tools, providing a flexible and versatile solution for security automation. See table below for more details.

      Functionality Open-Source Tools (integrated in Pipelines) Enterprise Tools (available for Integration)
      Hardcoded Credentials Scanner TruffleHog, GitLeaks, Git-secrets GitGuardian, SpectralOps, Bridgecrew
      Static Application Security Testing SonarQube, Semgrep CLI Veracode, Checkmarx, Coverity
      Software Composition Analysis OWASP Dependency-Check, cdxgen Black Duck Hub, Mend, Snyk
      Container Security Trivy, Grype, Clair Aqua Security, Sysdig Secure, Snyk
      Infrastructure as Code Security Checkov, Tfsec Bridgecrew, Prisma Cloud, Snyk
      Dynamic Application Security Testing OWASP Zed Attack Proxy Fortify WebInspect, Rapid7 InsightAppSec, Checkmarx
      Continuous Monitoring and Logging ELK Stack, OpenSearch, Loki Splunk, Datadog
      Security Audits and Assessments OpenVAS Tenable Nessus, QualysGuard, BurpSuite Professional
      Vulnerability Management and Reporting DefectDojo, OWASP Dependency-Track Metasploit

      For better visualization, see the scheme below:

      Security tools in EDP
      Security tools in EDP

      Integrated Tools⚓︎

      For obtaining and managing report post scanning, deployment of various vulnerability management systems and security tools is required. These include:

      DefectDojo⚓︎

      DefectDojo is a comprehensive vulnerability management and security orchestration platform facilitating the handling of uploaded security reports. Examine the prerequisites and fundamental instructions for installing DefectDojo on Kubernetes or OpenShift platforms.

      OWASP Dependency Track⚓︎

      Dependency Track is an intelligent Software Composition Analysis (SCA) platform that provides a comprehensive solution for managing vulnerabilities in third-party and open-source components.

      Gitleaks⚓︎

      Gitleaks is a versatile SAST tool used to scan Git repositories for hardcoded secrets, such as passwords and API keys, to prevent potential data leaks and unauthorized access.

      Trivy⚓︎

      Trivy is a simple and comprehensive vulnerability scanner for containers and other artifacts, providing insight into potential security issues across multiple ecosystems.

      Grype⚓︎

      Grype is a fast and reliable vulnerability scanner for container images and filesystems, maintaining an up-to-date vulnerability database for efficient and accurate scanning.

      Tfsec⚓︎

      Tfsec is an effective Infrastructure as Code (IaC) security scanner, tailored specifically for reviewing Terraform templates. It helps identify potential security issues related to misconfigurations and non-compliant practices, enabling developers to address vulnerabilities and ensure secure infrastructure deployment.

      Checkov⚓︎

      Checkov is a robust static code analysis tool designed for IaC security, supporting various IaC frameworks such as Terraform, CloudFormation, and Kubernetes. It assists in detecting and mitigating security and compliance misconfigurations, promoting best practices and adherence to industry standards across the infrastructure.

      Cdxgen⚓︎

      Cdxgen is a lightweight and efficient tool for generating Software Bill of Materials (SBOM) using CycloneDX, a standard format for managing component inventory. It helps organizations maintain an up-to-date record of all software components, their versions, and related vulnerabilities, streamlining monitoring and compliance within the software supply chain.

      Semgrep CLI⚓︎

      Semgrep CLI is a versatile and user-friendly command-line interface for the Semgrep security scanner, enabling developers to perform Static Application Security Testing (SAST) for various programming languages. It focuses on detecting and preventing potential security vulnerabilities, code quality issues, and custom anti-patterns, ensuring secure and efficient code development.

      Clair⚓︎

      Clair is an open-source container security tool that is designed to help you assess the security of container images and identify vulnerabilities within them. It is particularly useful for organizations using container orchestration platforms such as Kubernetes.

      OpenVAS⚓︎

      OpenVAS is an open-source network vulnerability scanner and security management tool. It is designed to identify and assess security vulnerabilities in computer systems, networks, and applications. OpenVAS provides a comprehensive set of features for vulnerability scanning, assessment, and management.

      TruffleHog⚓︎

      TruffleHog is an open-source Python tool designed for finding and identifying potentially sensitive and secret information in the source code and commit history of Git repositories. It's particularly useful for locating unintentional disclosures of confidential data, such as API keys, passwords, and other credentials, that might have been inadvertently committed to a code repository.

      Git-secrets⚓︎

      Git-secrets is an open-source tool that helps prevent the accidental committing of secrets, sensitive information, and other types of confidential data into Git repositories. It is designed to enforce security best practices and reduce the risk of unintentional data exposure by scanning Git repositories for predefined secret patterns.

      ELK Stack⚓︎

      ELK Stack (Fluent Bit, Elasticsearch, Kibana) stack is used in Kubernetes instead of ELK because this stack provides us with the support for Logsight for Stage Verification and Incident Detection. In addition to it, Fluent Bit has a smaller memory fingerprint than Logstash. Fluent Bit has the Inputs, Parsers, Filters and Outputs plugins similarly to Logstash.

      Loki⚓︎

      Loki is a log aggregation system and log processing system designed for cloud-native environments. It is part of the CNCF (Cloud Native Computing Foundation) and is often used alongside other CNCF projects like Prometheus for monitoring and observability.

      OpenSearch⚓︎

      OpenSearch is the flexible, scalable, open-source way to build solutions for data-intensive applications. Explore, enrich, and visualize your data with built-in performance, developer-friendly tools, and powerful integrations for machine learning, data processing, and more.

      OWASP Dependency-Check⚓︎

      OWASP Dependency-Check is a software composition analysis tool that helps identify and report known security vulnerabilities in project dependencies. It is particularly valuable for developers and organizations looking to secure their applications by identifying and addressing vulnerabilities in the libraries and components they use.

      OWASP Zed Attack Proxy (ZAP)⚓︎

      OWASP Zed Attack Proxy (ZAP) is an security testing tool for finding vulnerabilities in web applications during the development and testing phases. ZAP is designed to help developers and security professionals identify and address security issues and potential vulnerabilities in web applications. It provides automated and manual testing capabilities, as well as a wide range of features for security testing.

      \ No newline at end of file + Secure Delivery - EPAM Delivery Platform

      Secure Delivery on the Platform⚓︎

      The EPAM Delivery Platform emphasizes the importance of incorporating security practices into the software development lifecycle through the DevSecOps approach. By integrating a diverse range of open-source and enterprise security tools tailored to specific functionalities, organizations can ensure efficient and secure software development. These tools, combined with fundamental DevSecOps principles such as collaboration, continuous security, and automation, contribute to the identification and remediation of vulnerabilities early in the process, minimizes risks, and fosters a security-first culture across the organization.

      The EPAM Delivery Platform enabling seamless integration with various security tools and vulnerability management systems, enhancing the security of source code and ensuring compliance.

      Supported Solutions⚓︎

      The below table categorizes various open-source and enterprise security tools based on their specific functionalities. It provides a comprehensive view of the available options for each security aspect. This classification facilitates informed decision-making when selecting and integrating security tools into a development pipeline, ensuring an efficient and robust security stance. EDP supports the integration of both open-source and enterprise security tools, providing a flexible and versatile solution for security automation. See table below for more details.

      Functionality Open-Source Tools (integrated in Pipelines) Enterprise Tools (available for Integration)
      Hardcoded Credentials Scanner TruffleHog, GitLeaks, Git-secrets GitGuardian, SpectralOps, Bridgecrew
      Static Application Security Testing SonarQube, Semgrep CLI Veracode, Checkmarx, Coverity
      Software Composition Analysis OWASP Dependency-Check, cdxgen Black Duck Hub, Mend, Snyk
      Container Security Trivy, Grype, Clair Aqua Security, Sysdig Secure, Snyk
      Infrastructure as Code Security Checkov, Tfsec Bridgecrew, Prisma Cloud, Snyk
      Dynamic Application Security Testing OWASP Zed Attack Proxy Fortify WebInspect, Rapid7 InsightAppSec, Checkmarx
      Continuous Monitoring and Logging ELK Stack, OpenSearch, Loki Splunk, Datadog
      Security Audits and Assessments OpenVAS Tenable Nessus, QualysGuard, BurpSuite Professional
      Vulnerability Management and Reporting DefectDojo, OWASP Dependency-Track Metasploit

      For better visualization, see the scheme below:

      Security tools in EDP
      Security tools in EDP

      Integrated Tools⚓︎

      For obtaining and managing report post scanning, deployment of various vulnerability management systems and security tools is required. These include:

      DefectDojo⚓︎

      DefectDojo is a comprehensive vulnerability management and security orchestration platform facilitating the handling of uploaded security reports. Examine the prerequisites and fundamental instructions for installing DefectDojo on Kubernetes or OpenShift platforms.

      OWASP Dependency Track⚓︎

      Dependency Track is an intelligent Software Composition Analysis (SCA) platform that provides a comprehensive solution for managing vulnerabilities in third-party and open-source components.

      Gitleaks⚓︎

      Gitleaks is a versatile SAST tool used to scan Git repositories for hardcoded secrets, such as passwords and API keys, to prevent potential data leaks and unauthorized access.

      Trivy⚓︎

      Trivy is a simple and comprehensive vulnerability scanner for containers and other artifacts, providing insight into potential security issues across multiple ecosystems.

      Grype⚓︎

      Grype is a fast and reliable vulnerability scanner for container images and filesystems, maintaining an up-to-date vulnerability database for efficient and accurate scanning.

      Tfsec⚓︎

      Tfsec is an effective Infrastructure as Code (IaC) security scanner, tailored specifically for reviewing Terraform templates. It helps identify potential security issues related to misconfigurations and non-compliant practices, enabling developers to address vulnerabilities and ensure secure infrastructure deployment.

      Checkov⚓︎

      Checkov is a robust static code analysis tool designed for IaC security, supporting various IaC frameworks such as Terraform, CloudFormation, and Kubernetes. It assists in detecting and mitigating security and compliance misconfigurations, promoting best practices and adherence to industry standards across the infrastructure.

      Cdxgen⚓︎

      Cdxgen is a lightweight and efficient tool for generating Software Bill of Materials (SBOM) using CycloneDX, a standard format for managing component inventory. It helps organizations maintain an up-to-date record of all software components, their versions, and related vulnerabilities, streamlining monitoring and compliance within the software supply chain.

      Semgrep CLI⚓︎

      Semgrep CLI is a versatile and user-friendly command-line interface for the Semgrep security scanner, enabling developers to perform Static Application Security Testing (SAST) for various programming languages. It focuses on detecting and preventing potential security vulnerabilities, code quality issues, and custom anti-patterns, ensuring secure and efficient code development.

      Clair⚓︎

      Clair is an open-source container security tool that is designed to help you assess the security of container images and identify vulnerabilities within them. It is particularly useful for organizations using container orchestration platforms such as Kubernetes.

      OpenVAS⚓︎

      OpenVAS is an open-source network vulnerability scanner and security management tool. It is designed to identify and assess security vulnerabilities in computer systems, networks, and applications. OpenVAS provides a comprehensive set of features for vulnerability scanning, assessment, and management.

      TruffleHog⚓︎

      TruffleHog is an open-source Python tool designed for finding and identifying potentially sensitive and secret information in the source code and commit history of Git repositories. It's particularly useful for locating unintentional disclosures of confidential data, such as API keys, passwords, and other credentials, that might have been inadvertently committed to a code repository.

      Git-secrets⚓︎

      Git-secrets is an open-source tool that helps prevent the accidental committing of secrets, sensitive information, and other types of confidential data into Git repositories. It is designed to enforce security best practices and reduce the risk of unintentional data exposure by scanning Git repositories for predefined secret patterns.

      ELK Stack⚓︎

      ELK Stack (Fluent Bit, Elasticsearch, Kibana) stack is used in Kubernetes instead of ELK because this stack provides us with the support for Logsight for Stage Verification and Incident Detection. In addition to it, Fluent Bit has a smaller memory fingerprint than Logstash. Fluent Bit has the Inputs, Parsers, Filters and Outputs plugins similarly to Logstash.

      Loki⚓︎

      Loki is a log aggregation system and log processing system designed for cloud-native environments. It is part of the CNCF (Cloud Native Computing Foundation) and is often used alongside other CNCF projects like Prometheus for monitoring and observability.

      OpenSearch⚓︎

      OpenSearch is the flexible, scalable, open-source way to build solutions for data-intensive applications. Explore, enrich, and visualize your data with built-in performance, developer-friendly tools, and powerful integrations for machine learning, data processing, and more.

      OWASP Dependency-Check⚓︎

      OWASP Dependency-Check is a software composition analysis tool that helps identify and report known security vulnerabilities in project dependencies. It is particularly valuable for developers and organizations looking to secure their applications by identifying and addressing vulnerabilities in the libraries and components they use.

      OWASP Zed Attack Proxy (ZAP)⚓︎

      OWASP Zed Attack Proxy (ZAP) is an security testing tool for finding vulnerabilities in web applications during the development and testing phases. ZAP is designed to help developers and security professionals identify and address security issues and potential vulnerabilities in web applications. It provides automated and manual testing capabilities, as well as a wide range of features for security testing.

      \ No newline at end of file diff --git a/operator-guide/overview-multi-tenancy/index.html b/operator-guide/overview-multi-tenancy/index.html index a1baf4cb6..c248e4835 100644 --- a/operator-guide/overview-multi-tenancy/index.html +++ b/operator-guide/overview-multi-tenancy/index.html @@ -1 +1 @@ - Overview - EPAM Delivery Platform

      Overview⚓︎

      The EDP leverages the multi-tenancy approach to deliver a business-centric solution for managing containerized applications. The essence of this approach lies in using the additional tools for efficient resource allocation in a Kubernetes cluster. This approach grants tenants a considerable level of autonomy without risking the overall system's security and stability. It allows different users or teams to share the resources of a single Kubernetes cluster while keeping their workloads isolated. The resulting benefits include cost reduction, increased efficiency, improved scalability, and enhanced system resilience.

      The EDP project inherently can work with Capsule and Kiosk, two tools that enable managing multi-tenancy. While Kiosk empowers the creation of isolated environments within a Kubernetes cluster, Capsule extends these capabilities further by imposing restrictions on resource usage. Through effective use of these tools, the EDP project offers a robust multi-tenancy model that ensures optimal resource utilization and secure separation of responsibilities.

      \ No newline at end of file + Overview - EPAM Delivery Platform

      Overview⚓︎

      The EDP leverages the multi-tenancy approach to deliver a business-centric solution for managing containerized applications. The essence of this approach lies in using the additional tools for efficient resource allocation in a Kubernetes cluster. This approach grants tenants a considerable level of autonomy without risking the overall system's security and stability. It allows different users or teams to share the resources of a single Kubernetes cluster while keeping their workloads isolated. The resulting benefits include cost reduction, increased efficiency, improved scalability, and enhanced system resilience.

      The EDP project inherently can work with Capsule and Kiosk, two tools that enable managing multi-tenancy. While Kiosk empowers the creation of isolated environments within a Kubernetes cluster, Capsule extends these capabilities further by imposing restrictions on resource usage. Through effective use of these tools, the EDP project offers a robust multi-tenancy model that ensures optimal resource utilization and secure separation of responsibilities.

      \ No newline at end of file diff --git a/operator-guide/overview-sast/index.html b/operator-guide/overview-sast/index.html index f53549fc6..bf1b1f23c 100644 --- a/operator-guide/overview-sast/index.html +++ b/operator-guide/overview-sast/index.html @@ -1 +1 @@ - Static Application Security Testing Overview - EPAM Delivery Platform

      Static Application Security Testing Overview⚓︎

      EPAM Delivery Platform provides the implemented Static Application Security Testing support allowing to work with the Semgrep security scanner and the DefectDojo vulnerability management system to check the source code for known vulnerabilities.

      Supported Languages⚓︎

      EDP SAST supports a number of languages and package managers.

      Language (Package Managers) Scan Tool Build Tool
      Java Semgrep Maven, Gradle
      Go Semgrep Go
      React Semgrep Npm

      Supported Vulnerability Management System⚓︎

      To get and then manage a SAST report after scanning, it is necessary to deploy the vulnerability management system, for instance, DefectDojo.

      DefectDojo⚓︎

      DefectDojo is a vulnerability management and security orchestration platform that allows managing the uploaded security reports.

      Inspect the prerequisites and the main steps for installing DefectDojo on Kubernetes or OpenShift platforms.

      \ No newline at end of file + Static Application Security Testing Overview - EPAM Delivery Platform

      Static Application Security Testing Overview⚓︎

      EPAM Delivery Platform provides the implemented Static Application Security Testing support allowing to work with the Semgrep security scanner and the DefectDojo vulnerability management system to check the source code for known vulnerabilities.

      Supported Languages⚓︎

      EDP SAST supports a number of languages and package managers.

      Language (Package Managers) Scan Tool Build Tool
      Java Semgrep Maven, Gradle
      Go Semgrep Go
      React Semgrep Npm

      Supported Vulnerability Management System⚓︎

      To get and then manage a SAST report after scanning, it is necessary to deploy the vulnerability management system, for instance, DefectDojo.

      DefectDojo⚓︎

      DefectDojo is a vulnerability management and security orchestration platform that allows managing the uploaded security reports.

      Inspect the prerequisites and the main steps for installing DefectDojo on Kubernetes or OpenShift platforms.

      \ No newline at end of file diff --git a/operator-guide/package-registry/index.html b/operator-guide/package-registry/index.html index 6ab351047..418e32c00 100644 --- a/operator-guide/package-registry/index.html +++ b/operator-guide/package-registry/index.html @@ -1,4 +1,4 @@ - Package Registry - EPAM Delivery Platform

      Package Registry⚓︎

      This page describes the supported package registry providers and provides detailed instruction on how to adjust configurations to work properly with these package registry providers.

      Supported Package Registry Providers⚓︎

      Currently, KubeRocketCI Tekton pipelines support the following package registries:

      • Nexus;
      • GitLab;
      • GitHub;
      • Azure DevOps.

      The table below displays the supported registries and the languages they correspond to:

      Language Framework Build Tool Proxy Registry Snapshots/Releases Registry
      Java Java 8
      Java 11
      Java 17
      Maven Nexus
      Gitlab
      GitHub
      Azure DevOps
      Nexus
      Gitlab
      GitHub
      Azure DevOps
      Python Python 3.8
      FastAPI
      Flask
      Python Nexus
      Gitlab
      Azure DevOps
      Nexus
      Gitlab
      Azure DevOps
      C# .Net 3.1
      .Net 6.0
      .Net No proxy is used for this language. Nexus
      Gitlab
      GitHub
      Azure DevOps
      JavaScript React
      Vue
      Angular
      Express
      Next.js
      Antora
      NPM Nexus
      Gitlab
      GitHub
      Azure DevOps
      Nexus
      Gitlab
      GitHub
      Azure DevOps

      Proxy Package Registry Configuration⚓︎

      By default, KubeRocketCI uses Nexus as the proxy registry for storing and caching application dependencies. This setting is fixed and cannot be modified.

      Snapshot/Release Package Registry Configuration⚓︎

      The edp-tekton Helm Chart allows to override the default settings for package registries through tekton.configs part of its values.yaml file.

      To provide necessary credentials for accessing the package registries, the user should create the package-registries-auth-secret secret and set the tekton.packageRegistriesSecret.enabled value to true to mount the secret into the pipeline.

      To replace the default name of the secret, the user should set the tekton.packageRegistriesSecret.name parameter to the desired value:

      tekton:
      + Package Registry - EPAM Delivery Platform      

      Package Registry⚓︎

      This page describes the supported package registry providers and provides detailed instruction on how to adjust configurations to work properly with these package registry providers.

      Supported Package Registry Providers⚓︎

      Currently, KubeRocketCI Tekton pipelines support the following package registries:

      • Nexus;
      • GitLab;
      • GitHub;
      • Azure DevOps.

      The table below displays the supported registries and the languages they correspond to:

      Language Framework Build Tool Proxy Registry Snapshots/Releases Registry
      Java Java 8
      Java 11
      Java 17
      Maven Nexus
      Gitlab
      GitHub
      Azure DevOps
      Nexus
      Gitlab
      GitHub
      Azure DevOps
      Python Python 3.8
      FastAPI
      Flask
      Python Nexus
      Gitlab
      Azure DevOps
      Nexus
      Gitlab
      Azure DevOps
      C# .Net 3.1
      .Net 6.0
      .Net No proxy is used for this language. Nexus
      Gitlab
      GitHub
      Azure DevOps
      JavaScript React
      Vue
      Angular
      Express
      Next.js
      Antora
      NPM Nexus
      Gitlab
      GitHub
      Azure DevOps
      Nexus
      Gitlab
      GitHub
      Azure DevOps

      Proxy Package Registry Configuration⚓︎

      By default, KubeRocketCI uses Nexus as the proxy registry for storing and caching application dependencies. This setting is fixed and cannot be modified.

      Snapshot/Release Package Registry Configuration⚓︎

      The edp-tekton Helm Chart allows to override the default settings for package registries through tekton.configs part of its values.yaml file.

      To provide necessary credentials for accessing the package registries, the user should create the package-registries-auth-secret secret and set the tekton.packageRegistriesSecret.enabled value to true to mount the secret into the pipeline.

      To replace the default name of the secret, the user should set the tekton.packageRegistriesSecret.name parameter to the desired value:

      tekton:
         packageRegistriesSecret:
           enabled: true
           name: "package-registries-auth-secret"
      @@ -134,4 +134,4 @@
                   </nugetStorageReleases>
               </packageSourceCredentials>
           </configuration>
      -
      \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/prerequisites/index.html b/operator-guide/prerequisites/index.html index d50035986..8057cb962 100644 --- a/operator-guide/prerequisites/index.html +++ b/operator-guide/prerequisites/index.html @@ -1 +1 @@ - Overview - EPAM Delivery Platform

      EDP Installation Prerequisites⚓︎

      Before installing EDP via Helm Chart, ensure to complete the following steps:

      Note

      Alternatively, use the cluster add-ons approach to install the EDP components.

      After setting up the cluster and installing EDP components according to the scenario, proceed to the EDP installation.

      \ No newline at end of file + Overview - EPAM Delivery Platform

      EDP Installation Prerequisites⚓︎

      Before installing EDP via Helm Chart, ensure to complete the following steps:

      Note

      Alternatively, use the cluster add-ons approach to install the EDP components.

      After setting up the cluster and installing EDP components according to the scenario, proceed to the EDP installation.

      \ No newline at end of file diff --git a/operator-guide/report-portal-integration-tekton/index.html b/operator-guide/report-portal-integration-tekton/index.html index b62132af9..3536d7d7e 100644 --- a/operator-guide/report-portal-integration-tekton/index.html +++ b/operator-guide/report-portal-integration-tekton/index.html @@ -1,4 +1,4 @@ - Integration With Tekton - EPAM Delivery Platform

      Integration With Tekton⚓︎

      ReportPortal integration with Tekton allows managing all automation results and reports in one place, visualizing metrics and analytics, team collaborating to associate statistics results.

      For integration, take the following steps:

      1. Log in to the ReportPortal console and navigate to the User Profile menu:

        ReportPortal profile
        ReportPortal profile

      2. Copy the Access token and use it as a value while creating a kubernetes secret for the ReportPortal credentials:

        apiVersion: v1
        + Integration With Tekton - EPAM Delivery Platform      

        Integration With Tekton⚓︎

        ReportPortal integration with Tekton allows managing all automation results and reports in one place, visualizing metrics and analytics, team collaborating to associate statistics results.

        For integration, take the following steps:

        1. Log in to the ReportPortal console and navigate to the User Profile menu:

          ReportPortal profile
          ReportPortal profile

        2. Copy the Access token and use it as a value while creating a kubernetes secret for the ReportPortal credentials:

          apiVersion: v1
           kind: Secret
           type: Opaque
           metadata:
          @@ -76,4 +76,4 @@
             workspaces:
               - name: source
                 workspace: shared-workspace
          -
        3. Launch your Tekton pipeline and check that the custom task has been successfully executed:

          Tekton task successfully executed
          Tekton task successfully executed

        4. Test reports will be displayed in the Launches section of the ReportPortal:

          Test report results
          Test report results

        \ No newline at end of file +
      3. Launch your Tekton pipeline and check that the custom task has been successfully executed:

        Tekton task successfully executed
        Tekton task successfully executed

      4. Test reports will be displayed in the Launches section of the ReportPortal:

        Test report results
        Test report results

      \ No newline at end of file diff --git a/operator-guide/reportportal-keycloak/index.html b/operator-guide/reportportal-keycloak/index.html index 5f9eb2fbf..229e6add2 100644 --- a/operator-guide/reportportal-keycloak/index.html +++ b/operator-guide/reportportal-keycloak/index.html @@ -1 +1 @@ - Keycloak Integration - EPAM Delivery Platform

      Keycloak Integration⚓︎

      Follow the steps below to integrate the ReportPortal with Keycloak.

      Info

      It is also possible to install ReportPortal using the cluster add-ons. For details, please refer to the Install via Add-Ons page.

      Prerequisites⚓︎

      • Keycloak is installed. For detailed instructions, please refer to the provided documentation.
      • ReportPortal is installed. To install it using the Helm Chart, please follow the instructions outlined in the corresponding documentation.

      Keycloak Configuration⚓︎

      1. Navigate to Client Scopes > Create client scope and create a new scope with the SAML protocol type.

      2. Navigate to Client Scopes > your_scope_name > Mappers > Configure a new mapper > select the User Attribute mapper type. Add three mappers for the email, first name, and last name by typing lastName, firstName, and email in the User Attribute field:

        • Name is a display name in Keycloak.
        • User Attribute is a user property for mapping.
        • SAML Attribute Name is an attribute used for requesting information in the ReportPortal configuration.
        • SAML Attribute NameFormat: Basic.
        • Aggregate attribute values: Off.

        User mapper sample
        User mapper sample
        Scope mappers
        Scope mappers

      3. Navigate to Clients > Create client and fill in the following fields:

        • Client type: SAML.
        • Client ID: report.portal.sp.id.

        Warning

        The report.portal.sp.id Client ID is a constant value.

      4. Navigate to Client > your_client > Settings and add https://<report-portal-url\>/* to the Valid redirect URIs.

      5. Navigate to Client > your_client > Keys and disable Client signature required.

        Client keys
        Client keys

      6. Navigate to Client > your_client > Client scopes and add the scope created on step 3 with the default Assigned type.

        Client scopes
        Client scopes

      ReportPortal Configuration⚓︎

      1. Log in to the ReportPortal with the admin permissions.

      2. Navigate to Client > Administrate > Plugins and select the SAML plugin.

        Plugins menu
        Plugins menu

      3. To add a new integration, fill in the following fields:

        Add SAML configuration
        Add SAML configuration

        • Provider name is the display name in the ReportPortal login page.
        • Metadata URL: https://<keycloak_url\>/auth/realms/<realm\>/protocol/saml/descriptor.
        • Email is the value from the SAML Attribute Name field in the Keycloak mapper.
        • RP callback URL: https://<report_portal_url\>/uat.
        • Name attributes mode is the first & last name (type based on your mapper).
        • First name is the value from the SAML Attribute Name field in the Keycloak mapper.
        • Last name is the value from the SAML Attribute Name field in the Keycloak mapper.
      4. Log in to the ReportPortal.

        Note

        By default, after the first login, ReportPortal creates the <your_email>_personal project and adds an account with the Project manager role.

        Report portal login page
        Report portal login page

      \ No newline at end of file + Keycloak Integration - EPAM Delivery Platform

      Keycloak Integration⚓︎

      Follow the steps below to integrate the ReportPortal with Keycloak.

      Info

      It is also possible to install ReportPortal using the cluster add-ons. For details, please refer to the Install via Add-Ons page.

      Prerequisites⚓︎

      • Keycloak is installed. For detailed instructions, please refer to the provided documentation.
      • ReportPortal is installed. To install it using the Helm Chart, please follow the instructions outlined in the corresponding documentation.

      Keycloak Configuration⚓︎

      1. Navigate to Client Scopes > Create client scope and create a new scope with the SAML protocol type.

      2. Navigate to Client Scopes > your_scope_name > Mappers > Configure a new mapper > select the User Attribute mapper type. Add three mappers for the email, first name, and last name by typing lastName, firstName, and email in the User Attribute field:

        • Name is a display name in Keycloak.
        • User Attribute is a user property for mapping.
        • SAML Attribute Name is an attribute used for requesting information in the ReportPortal configuration.
        • SAML Attribute NameFormat: Basic.
        • Aggregate attribute values: Off.

        User mapper sample
        User mapper sample
        Scope mappers
        Scope mappers

      3. Navigate to Clients > Create client and fill in the following fields:

        • Client type: SAML.
        • Client ID: report.portal.sp.id.

        Warning

        The report.portal.sp.id Client ID is a constant value.

      4. Navigate to Client > your_client > Settings and add https://<report-portal-url\>/* to the Valid redirect URIs.

      5. Navigate to Client > your_client > Keys and disable Client signature required.

        Client keys
        Client keys

      6. Navigate to Client > your_client > Client scopes and add the scope created on step 3 with the default Assigned type.

        Client scopes
        Client scopes

      ReportPortal Configuration⚓︎

      1. Log in to the ReportPortal with the admin permissions.

      2. Navigate to Client > Administrate > Plugins and select the SAML plugin.

        Plugins menu
        Plugins menu

      3. To add a new integration, fill in the following fields:

        Add SAML configuration
        Add SAML configuration

        • Provider name is the display name in the ReportPortal login page.
        • Metadata URL: https://<keycloak_url\>/auth/realms/<realm\>/protocol/saml/descriptor.
        • Email is the value from the SAML Attribute Name field in the Keycloak mapper.
        • RP callback URL: https://<report_portal_url\>/uat.
        • Name attributes mode is the first & last name (type based on your mapper).
        • First name is the value from the SAML Attribute Name field in the Keycloak mapper.
        • Last name is the value from the SAML Attribute Name field in the Keycloak mapper.
      4. Log in to the ReportPortal.

        Note

        By default, after the first login, ReportPortal creates the <your_email>_personal project and adds an account with the Project manager role.

        Report portal login page
        Report portal login page

      \ No newline at end of file diff --git a/operator-guide/restore-edp-with-velero/index.html b/operator-guide/restore-edp-with-velero/index.html index 3ffb26a1c..a09d593e4 100644 --- a/operator-guide/restore-edp-with-velero/index.html +++ b/operator-guide/restore-edp-with-velero/index.html @@ -1,2 +1,2 @@ - Restore EDP Tenant With Velero - EPAM Delivery Platform

      Restore EDP Tenant With Velero⚓︎

      You can use the Velero tool to restore a EDP tenant. Explore the main steps for backup and restoring below.

      1. Delete all related entities in Keycloak: realm and clients from master/openshift realms. Navigate to the entities list in the Keycloak, select the necessary ones, and click the deletion icon on the entity overview page. If there are customized configs in Keycloak, save them before making backup.

        Remove keycloak realm
        Remove keycloak realm

      2. To restore EDP, install and configure the Velero tool. Please refer to the Install Velero documentation for details.

      3. Remove all locks for operators. Delete all config maps that have ‹OPERATOR_NAME›-operator-lock names. Then restart all pods with operators, or simply run the following command:

        kubectl -n edp delete cm $(kubectl -n edp get cm | grep 'operator-lock' | awk '{print $1}')
        -
      4. Recreate the admin password and delete the Jenkins pod. Or change the script to update the admin password in Jenkins every time when the pod is updated.

      \ No newline at end of file + Restore EDP Tenant With Velero - EPAM Delivery Platform

      Restore EDP Tenant With Velero⚓︎

      You can use the Velero tool to restore a EDP tenant. Explore the main steps for backup and restoring below.

      1. Delete all related entities in Keycloak: realm and clients from master/openshift realms. Navigate to the entities list in the Keycloak, select the necessary ones, and click the deletion icon on the entity overview page. If there are customized configs in Keycloak, save them before making backup.

        Remove keycloak realm
        Remove keycloak realm

      2. To restore EDP, install and configure the Velero tool. Please refer to the Install Velero documentation for details.

      3. Remove all locks for operators. Delete all config maps that have ‹OPERATOR_NAME›-operator-lock names. Then restart all pods with operators, or simply run the following command:

        kubectl -n edp delete cm $(kubectl -n edp get cm | grep 'operator-lock' | awk '{print $1}')
        +
      4. Recreate the admin password and delete the Jenkins pod. Or change the script to update the admin password in Jenkins every time when the pod is updated.

      \ No newline at end of file diff --git a/operator-guide/schedule-pods-restart/index.html b/operator-guide/schedule-pods-restart/index.html index cf007bc9b..e2c326599 100644 --- a/operator-guide/schedule-pods-restart/index.html +++ b/operator-guide/schedule-pods-restart/index.html @@ -1,4 +1,4 @@ - Schedule Pods Restart - EPAM Delivery Platform

      Schedule Pods Restart⚓︎

      In case it is necessary to restart pods, use a CronJob according to the following template:

      View: template
      ---
      + Schedule Pods Restart - EPAM Delivery Platform      

      Schedule Pods Restart⚓︎

      In case it is necessary to restart pods, use a CronJob according to the following template:

      View: template
      ---
       kind: Role
       apiVersion: rbac.authorization.k8s.io/v1
       metadata:
      @@ -54,4 +54,4 @@
                       - -c
                       - kubectl get -n <NAMESPACE> -o name deployment,statefulset | grep <NAME_PATTERN>| xargs kubectl -n <NAMESPACE> rollout restart
                 restartPolicy: Never
      -

      Modify the Cron expression in the CronJob manifest if needed.

      \ No newline at end of file +

      Modify the Cron expression in the CronJob manifest if needed.

      \ No newline at end of file diff --git a/operator-guide/sonarqube-visibility/index.html b/operator-guide/sonarqube-visibility/index.html index 2e40e15a3..060a6251d 100644 --- a/operator-guide/sonarqube-visibility/index.html +++ b/operator-guide/sonarqube-visibility/index.html @@ -1 +1 @@ - SonarQube Project Visibility - EPAM Delivery Platform

      SonarQube Project Visibility⚓︎

      This documentation serves as a detailed guide on configuring access rights within SonarQube projects. It is primarily aimed at ensuring that only authorized users can view and interact with the projects hosted on the SonarQube platform. The guide is structured to assist both new and existing SonarQube projects in managing their visibility settings effectively.

      Upon logging into SonarQube through the OpenID Connect mechanism, users are automatically assigned to the sonar-users group, granting them access to all projects. However, this document outlines methods to customize these default settings to enhance security and privacy. It is divided into two main sections: one focusing on restricting access for the new projects and the other on configuring access for the existing projects.

      Restrict Access for New Projects⚓︎

      In its default configuration, SonarQube does not restrict access to newly created projects, making them accessible to all instance users. To modify this behavior and set new projects to private by default, follow these instructions:

      1. Open the SonarQube UI in the browser.

      2. Navigate to the Administration tab:

        Nexus user settings
        Nexus user settings

        Note

        Ensure you have admin rights to see the Administration section.

      3. Click the Projects button and select Management:

        Nexus user settings
        Nexus user settings

      4. On the project management page, click pencil icon at the top-right corner::

        Nexus user settings
        Nexus user settings

      5. Select Private and click Change Default Visibility:

        Nexus user settings
        Nexus user settings

      Configure Access for Existing Projects⚓︎

      To make all the current projects private, follow the steps below:

      1. In the Projects tab, enter the project you want to make private.

      2. In the project page, click the Prject Settings button and select Permissions:

        Nexus user settings
        Nexus user settings

      3. In the project permissions page, select Private:

        Nexus user settings
        Nexus user settings

      4. Repeat the procedure for all of the projects you want to make private.

      \ No newline at end of file + SonarQube Project Visibility - EPAM Delivery Platform

      SonarQube Project Visibility⚓︎

      This documentation serves as a detailed guide on configuring access rights within SonarQube projects. It is primarily aimed at ensuring that only authorized users can view and interact with the projects hosted on the SonarQube platform. The guide is structured to assist both new and existing SonarQube projects in managing their visibility settings effectively.

      Upon logging into SonarQube through the OpenID Connect mechanism, users are automatically assigned to the sonar-users group, granting them access to all projects. However, this document outlines methods to customize these default settings to enhance security and privacy. It is divided into two main sections: one focusing on restricting access for the new projects and the other on configuring access for the existing projects.

      Restrict Access for New Projects⚓︎

      In its default configuration, SonarQube does not restrict access to newly created projects, making them accessible to all instance users. To modify this behavior and set new projects to private by default, follow these instructions:

      1. Open the SonarQube UI in the browser.

      2. Navigate to the Administration tab:

        Nexus user settings
        Nexus user settings

        Note

        Ensure you have admin rights to see the Administration section.

      3. Click the Projects button and select Management:

        Nexus user settings
        Nexus user settings

      4. On the project management page, click pencil icon at the top-right corner::

        Nexus user settings
        Nexus user settings

      5. Select Private and click Change Default Visibility:

        Nexus user settings
        Nexus user settings

      Configure Access for Existing Projects⚓︎

      To make all the current projects private, follow the steps below:

      1. In the Projects tab, enter the project you want to make private.

      2. In the project page, click the Prject Settings button and select Permissions:

        Nexus user settings
        Nexus user settings

      3. In the project permissions page, select Private:

        Nexus user settings
        Nexus user settings

      4. Repeat the procedure for all of the projects you want to make private.

      \ No newline at end of file diff --git a/operator-guide/sonarqube/index.html b/operator-guide/sonarqube/index.html index 538861383..d0662004f 100644 --- a/operator-guide/sonarqube/index.html +++ b/operator-guide/sonarqube/index.html @@ -1,4 +1,4 @@ - SonarQube Integration - EPAM Delivery Platform

      SonarQube Integration⚓︎

      This documentation guide provides comprehensive instructions for integrating SonarQube with the EPAM Delivery Platform.

      Info

      In EDP release 3.5, we have changed the deployment strategy for the sonarqube-operator component, now it is not installed by default. The sonarURL parameter management has been transferred from the values.yaml file to Kubernetes secrets.

      Prerequisites⚓︎

      Before proceeding, ensure that you have the following prerequisites:

      • Kubectl version 1.26.0 is installed.
      • Helm version 3.12.0+ is installed.

      Installation⚓︎

      To install SonarQube with pre-defined templates, use the sonar-operator installed via Cluster Add-Ons approach.

      Configuration⚓︎

      To establish robust authentication and precise access control, generating a SonarQube token is essential. This token is a distinct identifier, enabling effortless integration between SonarQube and EDP. To generate the SonarQube token, proceed with the following steps:

      1. Open the SonarQube UI and navigate to Administration -> Security -> User. Create a new user or select an existing one. Click the Options List icon to create a token:

        SonarQube user settings
        SonarQube user settings

      2. Type the ci-user username, define an expiration period, and click the Generate button to create the token:

        SonarQube create token
        SonarQube create token

      3. Click the Copy button to copy the generated <Sonarqube-token>:

        SonarQube token
        SonarQube token

      4. Provision secrets using Manifest, EDP Portal or with the externalSecrets operator:

      Go to EDP Portal -> EDP -> Configuration -> SonarQube. Update or fill in the URL and Token fields and click the Save button:

      SonarQube update manual secret
      SonarQube update manual secret

      apiVersion: v1
      + SonarQube Integration - EPAM Delivery Platform      

      SonarQube Integration⚓︎

      This documentation guide provides comprehensive instructions for integrating SonarQube with the EPAM Delivery Platform.

      Info

      In EDP release 3.5, we have changed the deployment strategy for the sonarqube-operator component, now it is not installed by default. The sonarURL parameter management has been transferred from the values.yaml file to Kubernetes secrets.

      Prerequisites⚓︎

      Before proceeding, ensure that you have the following prerequisites:

      • Kubectl version 1.26.0 is installed.
      • Helm version 3.12.0+ is installed.

      Installation⚓︎

      To install SonarQube with pre-defined templates, use the sonar-operator installed via Cluster Add-Ons approach.

      Configuration⚓︎

      To establish robust authentication and precise access control, generating a SonarQube token is essential. This token is a distinct identifier, enabling effortless integration between SonarQube and EDP. To generate the SonarQube token, proceed with the following steps:

      1. Open the SonarQube UI and navigate to Administration -> Security -> User. Create a new user or select an existing one. Click the Options List icon to create a token:

        SonarQube user settings
        SonarQube user settings

      2. Type the ci-user username, define an expiration period, and click the Generate button to create the token:

        SonarQube create token
        SonarQube create token

      3. Click the Copy button to copy the generated <Sonarqube-token>:

        SonarQube token
        SonarQube token

      4. Provision secrets using Manifest, EDP Portal or with the externalSecrets operator:

      Go to EDP Portal -> EDP -> Configuration -> SonarQube. Update or fill in the URL and Token fields and click the Save button:

      SonarQube update manual secret
      SonarQube update manual secret

      apiVersion: v1
       kind: Secret
       metadata:
         name: ci-sonarqube
      @@ -15,4 +15,4 @@
         "url": "https://sonarqube.example.com",
         "token": "XXXXXXXXXXXX"
       },
      -

      Go to EDP Portal -> EDP -> Configuration -> SonarQube and see the Managed by External Secret message:

      SonarQube managed by external secret operator
      SonarQube managed by external secret operator

      More details about External Secrets Operator integration can be found in the External Secrets Operator Integration page.

      \ No newline at end of file +

      Go to EDP Portal -> EDP -> Configuration -> SonarQube and see the Managed by External Secret message:

      SonarQube managed by external secret operator
      SonarQube managed by external secret operator

      More details about External Secrets Operator integration can be found in the External Secrets Operator Integration page.

      \ No newline at end of file diff --git a/operator-guide/ssl-automation-okd/index.html b/operator-guide/ssl-automation-okd/index.html index 004fb457b..6a3dcd581 100644 --- a/operator-guide/ssl-automation-okd/index.html +++ b/operator-guide/ssl-automation-okd/index.html @@ -1,4 +1,4 @@ - Use Cert-Manager in OpenShift - EPAM Delivery Platform

      Use Cert-Manager in OpenShift⚓︎

      The following material covers Let's Encrypt certificate automation with cert-manager using AWS Route53.

      The cert-manager is a Kubernetes/OpenShift operator that allows to issue and automatically renew SSL certificates. In this tutorial, the steps to secure DNS Name will be demonstrated.

      Below is an instruction on how to automatically issue and install wildcard certificates on OpenShift Ingress Controller and API Server covering all cluster Routes. To secure separate OpenShift Routes, please refer to the OpenShift Route Support project for cert-manager.

      Prerequisites⚓︎

      • The cert-manager;
      • OpenShift v4.7 - v4.11;
      • Connection to the OpenShift Cluster;
      • Enabled AWS IRSA;
      • The latest oc utility. The kubectl tool can also be used for most of the commands.

      Install Cert-Manager Operator⚓︎

      Install the cert-manager operator via OpenShift OperatorHub that uses Operator Lifecycle Manager (OLM):

      1. Go to the OpenShift Admin Console → OperatorHub, search for the cert-manager, and click Install:

        Cert-Manager Installation
        Cert-Manager Installation

      2. Modify the ClusterServiceVersion OLM resource, by selecting the Update approval → Manual. If selecting Update approval → Automatic after the automatic operator update, the parameters in the ClusterServiceVersion will be reset to default.

        Note

        Installing an operator with Manual approval causes all operators installed in namespace openshift-operators to function as manual approval strategy. In case the Manual approval is chosen, review the manual installation plan and approve it.

        Cert-Manager Installation
        Cert-Manager Installation

      3. Navigate to Operators → Installed Operators and check the operator status to be Succeeded:

        Cert-Manager Installation
        Cert-Manager Installation

      4. In case of errors, troubleshoot the Operator issues:

        oc describe operator cert-manager -n openshift-operators
        + Use Cert-Manager in OpenShift - EPAM Delivery Platform      

        Use Cert-Manager in OpenShift⚓︎

        The following material covers Let's Encrypt certificate automation with cert-manager using AWS Route53.

        The cert-manager is a Kubernetes/OpenShift operator that allows to issue and automatically renew SSL certificates. In this tutorial, the steps to secure DNS Name will be demonstrated.

        Below is an instruction on how to automatically issue and install wildcard certificates on OpenShift Ingress Controller and API Server covering all cluster Routes. To secure separate OpenShift Routes, please refer to the OpenShift Route Support project for cert-manager.

        Prerequisites⚓︎

        • The cert-manager;
        • OpenShift v4.7 - v4.11;
        • Connection to the OpenShift Cluster;
        • Enabled AWS IRSA;
        • The latest oc utility. The kubectl tool can also be used for most of the commands.

        Install Cert-Manager Operator⚓︎

        Install the cert-manager operator via OpenShift OperatorHub that uses Operator Lifecycle Manager (OLM):

        1. Go to the OpenShift Admin Console → OperatorHub, search for the cert-manager, and click Install:

          Cert-Manager Installation
          Cert-Manager Installation

        2. Modify the ClusterServiceVersion OLM resource, by selecting the Update approval → Manual. If selecting Update approval → Automatic after the automatic operator update, the parameters in the ClusterServiceVersion will be reset to default.

          Note

          Installing an operator with Manual approval causes all operators installed in namespace openshift-operators to function as manual approval strategy. In case the Manual approval is chosen, review the manual installation plan and approve it.

          Cert-Manager Installation
          Cert-Manager Installation

        3. Navigate to Operators → Installed Operators and check the operator status to be Succeeded:

          Cert-Manager Installation
          Cert-Manager Installation

        4. In case of errors, troubleshoot the Operator issues:

          oc describe operator cert-manager -n openshift-operators
           oc describe sub cert-manager -n openshift-operators
           

        Create AWS Role for Route53⚓︎

        The cert-manager should be configured to validate Wildcard certificates using the DNS-based method.

        1. Check the DNS Hosted zone ID in AWS Route53 for your domain.

          Hosted Zone ID
          Hosted Zone ID

        2. Create Route53 Permissions policy in AWS for cert-manager to be able to create DNS TXT records for the certificate validation. In this example, cert-manager permissions are given for a particular DNS zone only. Replace Hosted zone ID XXXXXXXX in the "Resource": "arn:aws:route53:::hostedzone/XXXXXXXXXXXX".

          {
             "Version": "2012-10-17",
          @@ -241,4 +241,4 @@
           

          Since this field will be absent in the kubeconfig file, system root SSL certificate will be used to validate the cluster certificate trust chain. On Ubuntu, Let's Encrypt OpenShift cluster certificates will be validated against Internet Security Research Group root in /etc/ssl/certs/ca-certificates.crt.

          Certificate Renewals⚓︎

          The cert-manager automatically renews the certificates based on the X.509 certificate's duration and the renewBefore value. The minimum value for the spec.duration is 1 hour; for spec.renewBefore, 5 minutes. It is also required that spec.duration > spec.renewBefore.

          Use the cmctl tool to manually trigger a single instant certificate renewal:

          cmctl renew router-certs -n openshift-ingress
           cmctl renew api-certs -n openshift-config
           

          Otherwise, manually renew all certificates in all namespaces with the app=cert-manager label:

          cmctl renew --all-namespaces -l app=cert-manager
          -

          Run the cmctl renew --help command to get more details.

        \ No newline at end of file +

        Run the cmctl renew --help command to get more details.

      \ No newline at end of file diff --git a/operator-guide/tekton-monitoring/index.html b/operator-guide/tekton-monitoring/index.html index 08b08b6c0..fd2794e93 100644 --- a/operator-guide/tekton-monitoring/index.html +++ b/operator-guide/tekton-monitoring/index.html @@ -1,4 +1,4 @@ - Monitoring - EPAM Delivery Platform

      Monitoring⚓︎

      This documentation describes how to integrate tekton-pipelines metrics with Prometheus and Grafana monitoring stack.

      Prerequisites⚓︎

      Ensure the following requirements are met first before moving ahead:

      Create and Apply the Additional Scrape Config⚓︎

      To create and apply the additional scrape config, follow the steps below:

      1. Create the kubernetes secret file with the additional scrape config:

        additional-scrape-configs.yaml file
        apiVersion: v1
        + Monitoring - EPAM Delivery Platform      

        Monitoring⚓︎

        This documentation describes how to integrate tekton-pipelines metrics with Prometheus and Grafana monitoring stack.

        Prerequisites⚓︎

        Ensure the following requirements are met first before moving ahead:

        Create and Apply the Additional Scrape Config⚓︎

        To create and apply the additional scrape config, follow the steps below:

        1. Create the kubernetes secret file with the additional scrape config:

          additional-scrape-configs.yaml file
          apiVersion: v1
           kind: Secret
           metadata:
             name: additional-scrape-configs
          @@ -16,4 +16,4 @@
                 enabled: true
                 name: additional-scrape-configs
                 key: prometheus-additional-job.yaml
          -
        2. Download the KubeRocketCi dashboard:

          Import dashboard grafana
          Import Grafana dashboard

          a. Click on the dashboard menu;

          b. In the dropdown menu, click the + Import button;

          c. Select the created 18321_rev*.json file;

          Import dashboard grafana options
          Import Grafana dashboard: Options

          d. Type the name of the dashboard;

          e. Select the folder for the dashboard;

          f. Type the UID (set of eight numbers or letters and symbols);

          g. Click the Import button.

        As soon as the dashboard procedure is completed, you can track the newcoming metrics in the dashboard menu:

        Tekton dashboard
        Tekton dashboard

        \ No newline at end of file +
      2. Download the KubeRocketCi dashboard:

        Import dashboard grafana
        Import Grafana dashboard

        a. Click on the dashboard menu;

        b. In the dropdown menu, click the + Import button;

        c. Select the created 18321_rev*.json file;

        Import dashboard grafana options
        Import Grafana dashboard: Options

        d. Type the name of the dashboard;

        e. Select the folder for the dashboard;

        f. Type the UID (set of eight numbers or letters and symbols);

        g. Click the Import button.

      As soon as the dashboard procedure is completed, you can track the newcoming metrics in the dashboard menu:

      Tekton dashboard
      Tekton dashboard

      \ No newline at end of file diff --git a/operator-guide/tekton-overview/index.html b/operator-guide/tekton-overview/index.html index 5e43cec0d..8824e93fd 100644 --- a/operator-guide/tekton-overview/index.html +++ b/operator-guide/tekton-overview/index.html @@ -1 +1 @@ - Overview - EPAM Delivery Platform

      Tekton Overview⚓︎

      EPAM Delivery Platform provides Continuous Integration based on Tekton.

      Tekton is an open-source Kubernetes native framework for creating CI pipelines, allowing a user to compile, build and test applications.

      The edp-tekton GitHub repository provides all Tekton implementation logic on the platform. The Helm charts are used to deploy the resources inside the Kubernetes cluster. Tekton logic is decoupled into separate components:

      Edp-tekton components diagram
      Edp-tekton components diagram

      The diagram above describes the following:

      • Common-library is the Helm chart of Library type which stores the common logic shareable across all Tekton pipelines. This library contains Helm templates that generate common Tekton resources.
      • Pipelines-library is the Helm chart of the Application type which stores the core logic for the EDP pipelines. Tekton CRs like Pipelines, Tasks, EventListeners, Triggers, TriggerTemplates, and other resources are delivered with this chart.
      • Custom-pipelines is the Helm chart of the Application type which implements custom logic running specifically for internal EDP development, for example, CI and Release. It also demonstrates the customization flow on the platform.
      • Tekton-dashboard is a multitenancy-adopted implementation of the upstream Tekton Dashboard. It is configured to share Tekton resources across a single namespace.
      • EDP Interceptor is the custom Tekton Interceptor which enriches the payload from the VCSs events with EDP data from the Codebase CR specification. These data are used to define the Pipeline logic.

      Inspect the schema below that describes the logic behind the Tekton functionality on the platform:

      Component view for the Tekton on EDP
      Component view for the Tekton on EDP

      The platform logic consists of the following:

      1. The EventListener exposes a dedicated Pod that runs the sink logic and receives incoming events from the VCSs (Gerrit, GitHub, GitLab) through the Ingress. It contains triggers with filtering and routing rules for incoming requests.

      2. Upon the Event Payload arrival, the EventListener runs triggers to process information or validate it via different interceptors.

      3. The EDP Interceptor extracts information from the codebases.v2.edp.epam.com CR and injects the received data into top-level 'extensions' field of the Event Payload. The Interceptor consists of running Pod and Service.

      4. The Tekton Cel Interceptor does simple transformations of the resulting data and prepares them for the Pipeline parameters substitution.

      5. The TriggerTemplate creates a PipelineRun instance with the required parameters extracted from the Event Payload by Interceptors. These parameters are mandatory for Pipelines.

      6. The PipelineRun has a mapping to the EDP Tekton Pipelines using a template approach which reduces code duplication. Each Pipeline is designed for a specific VCS (Gerrit, GitLab, GitHub), technology stack (such as Java or Python), and type (code-review, build).

      7. A Pipeline consists of separate EDP Tekton or open-source Tasks. They are arranged in a specific order of execution in the Pipeline.

      8. Each Task is executed as a Pod on the Kubernetes cluster. Also, Tasks can have a different number of steps that are executed as a Container in Pod.

      9. The Kubernetes native approach allows the creation of PipelineRun either with the kubectl tool or using the EDP Portal UI.

      \ No newline at end of file + Overview - EPAM Delivery Platform

      Tekton Overview⚓︎

      EPAM Delivery Platform provides Continuous Integration based on Tekton.

      Tekton is an open-source Kubernetes native framework for creating CI pipelines, allowing a user to compile, build and test applications.

      The edp-tekton GitHub repository provides all Tekton implementation logic on the platform. The Helm charts are used to deploy the resources inside the Kubernetes cluster. Tekton logic is decoupled into separate components:

      Edp-tekton components diagram
      Edp-tekton components diagram

      The diagram above describes the following:

      • Common-library is the Helm chart of Library type which stores the common logic shareable across all Tekton pipelines. This library contains Helm templates that generate common Tekton resources.
      • Pipelines-library is the Helm chart of the Application type which stores the core logic for the EDP pipelines. Tekton CRs like Pipelines, Tasks, EventListeners, Triggers, TriggerTemplates, and other resources are delivered with this chart.
      • Custom-pipelines is the Helm chart of the Application type which implements custom logic running specifically for internal EDP development, for example, CI and Release. It also demonstrates the customization flow on the platform.
      • Tekton-dashboard is a multitenancy-adopted implementation of the upstream Tekton Dashboard. It is configured to share Tekton resources across a single namespace.
      • EDP Interceptor is the custom Tekton Interceptor which enriches the payload from the VCSs events with EDP data from the Codebase CR specification. These data are used to define the Pipeline logic.

      Inspect the schema below that describes the logic behind the Tekton functionality on the platform:

      Component view for the Tekton on EDP
      Component view for the Tekton on EDP

      The platform logic consists of the following:

      1. The EventListener exposes a dedicated Pod that runs the sink logic and receives incoming events from the VCSs (Gerrit, GitHub, GitLab) through the Ingress. It contains triggers with filtering and routing rules for incoming requests.

      2. Upon the Event Payload arrival, the EventListener runs triggers to process information or validate it via different interceptors.

      3. The EDP Interceptor extracts information from the codebases.v2.edp.epam.com CR and injects the received data into top-level 'extensions' field of the Event Payload. The Interceptor consists of running Pod and Service.

      4. The Tekton Cel Interceptor does simple transformations of the resulting data and prepares them for the Pipeline parameters substitution.

      5. The TriggerTemplate creates a PipelineRun instance with the required parameters extracted from the Event Payload by Interceptors. These parameters are mandatory for Pipelines.

      6. The PipelineRun has a mapping to the EDP Tekton Pipelines using a template approach which reduces code duplication. Each Pipeline is designed for a specific VCS (Gerrit, GitLab, GitHub), technology stack (such as Java or Python), and type (code-review, build).

      7. A Pipeline consists of separate EDP Tekton or open-source Tasks. They are arranged in a specific order of execution in the Pipeline.

      8. Each Task is executed as a Pod on the Kubernetes cluster. Also, Tasks can have a different number of steps that are executed as a Container in Pod.

      9. The Kubernetes native approach allows the creation of PipelineRun either with the kubectl tool or using the EDP Portal UI.

      \ No newline at end of file diff --git a/operator-guide/troubleshooting/application-not-built/index.html b/operator-guide/troubleshooting/application-not-built/index.html index 957d6e4c7..85bc9147b 100644 --- a/operator-guide/troubleshooting/application-not-built/index.html +++ b/operator-guide/troubleshooting/application-not-built/index.html @@ -1 +1 @@ - Codebase Build Process is Failed - EPAM Delivery Platform

      Codebase Build Process is Failed⚓︎

      Problem⚓︎

      Application can't succeed with build.

      Tree diagram window
      Tree diagram window

      Cause⚓︎

      Code quality checks may failed in SonarQube.

      Solution⚓︎

      1. Navigate to your application in KubeRocketCI portal.

      2. In the build history section, click the failed pipeline run name to open the pipeline run in Tekton.

      3. Open the sonar-scanner step:

        If the quality check is insufficient, it means that SonarQube rated the quality of your codebase as low. It is required to refine the code to pass the sonar-scanner step.

        Failed sonar-scanner step
        Failed sonar-scanner step

      4. Check if the SonarQube project is created in the SonarQube tool integrated.

      5. Once the SonarQube project is created, build application again.

        If you have set up SonarCloud for monitoring code quality, be aware that the initial build of the codebase is likely to fail. Following this failed pipeline run, you should see a new project created within the organization. Once this happens, establish a Quality Gate in SonarCloud subsequent to the initial pipeline execution, and then initiate the build pipeline again. This procedure is described in detail in the Create Application page.

      \ No newline at end of file + Codebase Build Process is Failed - EPAM Delivery Platform

      Codebase Build Process is Failed⚓︎

      Problem⚓︎

      Application can't succeed with build.

      Tree diagram window
      Tree diagram window

      Cause⚓︎

      Code quality checks may failed in SonarQube.

      Solution⚓︎

      1. Navigate to your application in KubeRocketCI portal.

      2. In the build history section, click the failed pipeline run name to open the pipeline run in Tekton.

      3. Open the sonar-scanner step:

        If the quality check is insufficient, it means that SonarQube rated the quality of your codebase as low. It is required to refine the code to pass the sonar-scanner step.

        Failed sonar-scanner step
        Failed sonar-scanner step

      4. Check if the SonarQube project is created in the SonarQube tool integrated.

      5. Once the SonarQube project is created, build application again.

        If you have set up SonarCloud for monitoring code quality, be aware that the initial build of the codebase is likely to fail. Following this failed pipeline run, you should see a new project created within the organization. Once this happens, establish a Quality Gate in SonarCloud subsequent to the initial pipeline execution, and then initiate the build pipeline again. This procedure is described in detail in the Create Application page.

      \ No newline at end of file diff --git a/operator-guide/troubleshooting/invalid-codebase-name/index.html b/operator-guide/troubleshooting/invalid-codebase-name/index.html index c311748a4..3111f8af4 100644 --- a/operator-guide/troubleshooting/invalid-codebase-name/index.html +++ b/operator-guide/troubleshooting/invalid-codebase-name/index.html @@ -1,2 +1,2 @@ - Invalid Codebase ID Issue (GitHub/GitLab VCS) - EPAM Delivery Platform

      Invalid Codebase ID Issue (GitHub/GitLab VCS)⚓︎

      Problem⚓︎

      Users face unexpected "invalid project ID" error message when creating codebase:

      Invalid project ID error
      Invalid project ID error

      Cause⚓︎

      The root cause of this issue lies in incorrectly setting the repository relative path. Users may overlook the requirement to add a prefix the repository name with the account or organization name when working with GitHub or GitLab version control systems. This oversight leads to the system interpreting the project ID as invalid.

      Solution⚓︎

      To solve this issue, follow the steps below:

      1. Delete the codebase you failed to create.

      2. Create a new one but with the correct Git repository relative path format specified.

        When creating a new codebase, ensure that the Git repository relative path is formatted correctly. This entails prefixing the repository name with the respective account or organization name. For example:

        account_or_organisation_name/repository_name
        -

        Please refer to the Add Application page for more details.

      3. Check if the porblem disappeared. It may take several minutes for the codebase to update its status.

        After creating the new codebase with the corrected repository path, check if the problem persists. It's important to note that it may take a few minutes for the codebase to update its status. Once the codebase is synced, ensure that the "invalid project ID" error no longer occurs:

        Codebase created
        Codebase created

      \ No newline at end of file + Invalid Codebase ID Issue (GitHub/GitLab VCS) - EPAM Delivery Platform

      Invalid Codebase ID Issue (GitHub/GitLab VCS)⚓︎

      Problem⚓︎

      Users face unexpected "invalid project ID" error message when creating codebase:

      Invalid project ID error
      Invalid project ID error

      Cause⚓︎

      The root cause of this issue lies in incorrectly setting the repository relative path. Users may overlook the requirement to add a prefix the repository name with the account or organization name when working with GitHub or GitLab version control systems. This oversight leads to the system interpreting the project ID as invalid.

      Solution⚓︎

      To solve this issue, follow the steps below:

      1. Delete the codebase you failed to create.

      2. Create a new one but with the correct Git repository relative path format specified.

        When creating a new codebase, ensure that the Git repository relative path is formatted correctly. This entails prefixing the repository name with the respective account or organization name. For example:

        account_or_organisation_name/repository_name
        +

        Please refer to the Add Application page for more details.

      3. Check if the porblem disappeared. It may take several minutes for the codebase to update its status.

        After creating the new codebase with the corrected repository path, check if the problem persists. It's important to note that it may take a few minutes for the codebase to update its status. Once the codebase is synced, ensure that the "invalid project ID" error no longer occurs:

        Codebase created
        Codebase created

      \ No newline at end of file diff --git a/operator-guide/troubleshooting/overview/index.html b/operator-guide/troubleshooting/overview/index.html index aaff6e90b..aad576393 100644 --- a/operator-guide/troubleshooting/overview/index.html +++ b/operator-guide/troubleshooting/overview/index.html @@ -1 +1 @@ - Overview - EPAM Delivery Platform

      Troubleshooting Guide Overview⚓︎

      Welcome to the Troubleshooting Guide for the EPAM Delivery Platform. Here, we offer essential information to assist you with the challenges you may encounter while using the platform. This guide is designed to address common issues and answer frequently asked questions, aiming to streamline your troubleshooting process and enhance your experience. Whether you're dealing with deployment setbacks, authentication hurdles, or configuration complexities, you'll find helpful insights and solutions here.

      Currently, this section covers the following issues:

      \ No newline at end of file + Overview - EPAM Delivery Platform

      Troubleshooting Guide Overview⚓︎

      Welcome to the Troubleshooting Guide for the EPAM Delivery Platform. Here, we offer essential information to assist you with the challenges you may encounter while using the platform. This guide is designed to address common issues and answer frequently asked questions, aiming to streamline your troubleshooting process and enhance your experience. Whether you're dealing with deployment setbacks, authentication hurdles, or configuration complexities, you'll find helpful insights and solutions here.

      Currently, this section covers the following issues:

      \ No newline at end of file diff --git a/operator-guide/troubleshooting/resource-observability/index.html b/operator-guide/troubleshooting/resource-observability/index.html index 33118dc1d..8d251b2c8 100644 --- a/operator-guide/troubleshooting/resource-observability/index.html +++ b/operator-guide/troubleshooting/resource-observability/index.html @@ -1,4 +1,4 @@ - Resource Observability Issue - EPAM Delivery Platform

      Resource Observability Issue⚓︎

      Problem⚓︎

      Users can't see resources within the KubeRocketCI dedicated namespace via KubeRocketCI portal:

      Resource observability issue
      Resource observability issue

      Cause⚓︎

      The problem might be caused by several factors. First of all, default and allowed namespaces may be either unset or set incorrectly. Secondly, service account used for browsing the KubeRocketCI portal may have insufficient permission set. Thirdly, if Keycloak is used for as an authication mechanism, then the problem might be related to improper group membership.

      Solution⚓︎

      Solution can vary depending on the way users log into the platform, whether it is a service account token or an OpenID Connect mechanism.

      Solution 1 (Service Account Token)⚓︎

      This solution suits those who use a service account token to log into the KubeRocketCI portal.

      To fix the problem, follow the steps below:

      1. Check both default and allowed namespace by navigating to the KubeRocketCI portal -> Account Settings -> Cluster.

      2. Check access rights of the service account at your disposal using the kubectl describe command:

        kubectl describe serviceaccount <service-account-name>> -n edp
        + Resource Observability Issue - EPAM Delivery Platform      

        Resource Observability Issue⚓︎

        Problem⚓︎

        Users can't see resources within the KubeRocketCI dedicated namespace via KubeRocketCI portal:

        Resource observability issue
        Resource observability issue

        Cause⚓︎

        The problem might be caused by several factors. First of all, default and allowed namespaces may be either unset or set incorrectly. Secondly, service account used for browsing the KubeRocketCI portal may have insufficient permission set. Thirdly, if Keycloak is used for as an authication mechanism, then the problem might be related to improper group membership.

        Solution⚓︎

        Solution can vary depending on the way users log into the platform, whether it is a service account token or an OpenID Connect mechanism.

        Solution 1 (Service Account Token)⚓︎

        This solution suits those who use a service account token to log into the KubeRocketCI portal.

        To fix the problem, follow the steps below:

        1. Check both default and allowed namespace by navigating to the KubeRocketCI portal -> Account Settings -> Cluster.

        2. Check access rights of the service account at your disposal using the kubectl describe command:

          kubectl describe serviceaccount <service-account-name>> -n edp
           
        3. Сheck the role binding associated with the account:

          kubectl describe serviceaccount <role-binding-name> -n edp
           
        4. Check permissions of the role used in the role binding:

          kubectl desribe role <role-name> -n edp
          -
        5. Adjust resources to have sufficient permission set. Refer to the KubeRocketCI Access Model page for more details about permissions required for proper resource visibility.

        Solution 2 (Keycloak)⚓︎

        This solution suits those who to log into the KubeRocketCI portal Keycloak.

        To fix the problem, follow the steps below:

        1. Log into your Keycloak portal.

        2. Check you user membership among the predefined Keycloak groups. Please refer to the KubeRocketCI Access Model page for details on the available Keycloak groups.

        3. Add your user to the groups depending on which permissions you need to grant.

        \ No newline at end of file +
      3. Adjust resources to have sufficient permission set. Refer to the KubeRocketCI Access Model page for more details about permissions required for proper resource visibility.

      Solution 2 (Keycloak)⚓︎

      This solution suits those who to log into the KubeRocketCI portal Keycloak.

      To fix the problem, follow the steps below:

      1. Log into your Keycloak portal.

      2. Check you user membership among the predefined Keycloak groups. Please refer to the KubeRocketCI Access Model page for details on the available Keycloak groups.

      3. Add your user to the groups depending on which permissions you need to grant.

      \ No newline at end of file diff --git a/operator-guide/troubleshooting/troubleshoot-applications/index.html b/operator-guide/troubleshooting/troubleshoot-applications/index.html index 420d436a5..fea8aac55 100644 --- a/operator-guide/troubleshooting/troubleshoot-applications/index.html +++ b/operator-guide/troubleshooting/troubleshoot-applications/index.html @@ -1,7 +1,7 @@ - Application Already Exists Error (Gerrit VCS) - EPAM Delivery Platform

      Application Already Exists Error (Gerrit VCS)⚓︎

      Problem⚓︎

      User receives an error message when creating applications using Gerrit as a Git Server provider.

      Cause⚓︎

      Gerrit operator might get stuck during its work.

      Solution⚓︎

      Restarting the Gerrit-related pods can be a solution to the problem:

      1. Check the GerritProject, CodebaseImageStream, CodebaseBranch, and Codebase custom resources related to the previously created application:

        kubectl get CodebaseBranch -n edp
        + Application Already Exists Error (Gerrit VCS) - EPAM Delivery Platform      

        Application Already Exists Error (Gerrit VCS)⚓︎

        Problem⚓︎

        User receives an error message when creating applications using Gerrit as a Git Server provider.

        Cause⚓︎

        Gerrit operator might get stuck during its work.

        Solution⚓︎

        Restarting the Gerrit-related pods can be a solution to the problem:

        1. Check the GerritProject, CodebaseImageStream, CodebaseBranch, and Codebase custom resources related to the previously created application:

          kubectl get CodebaseBranch -n edp
           kubectl get CodebaseImageStream -n edp
           kubectl get Codebase -n edp
           kubectl get GerritProject -n edp
           
        2. Delete the custom resources that relate to the problem application using the kubectl delete command.

        3. Check the pods in your project namespace:

          kubectl get pods -n edp
           
        4. Delete the gerrit-operator and gerrit pods so the replica set will be able to spin up new pods:

          kubectl get pods -n edp
          -
        5. Create the application again. Now it is supposed to create application successfully.

        \ No newline at end of file +
      2. Create the application again. Now it is supposed to create application successfully.

      \ No newline at end of file diff --git a/operator-guide/troubleshooting/troubleshoot-container-registries/index.html b/operator-guide/troubleshooting/troubleshoot-container-registries/index.html index e27f9ae81..c577f3d4a 100644 --- a/operator-guide/troubleshooting/troubleshoot-container-registries/index.html +++ b/operator-guide/troubleshooting/troubleshoot-container-registries/index.html @@ -1,7 +1,7 @@ - Container Registry Reset - EPAM Delivery Platform

      Container Registry Reset⚓︎

      Problem⚓︎

      Reset container registry is not feasible due to the RESET REGISTRY button not accessible.

      Cause⚓︎

      The KubeRocketCI Portal does not allow to reconfigure the registry because the registry secrets have external owners.

      Solution⚓︎

      Remove the kaniko-docker-config and regcred resources from both the ExternalSecret custom resources and the Kubernetes secrets.

      1. Check the kaniko-docker-config and regcred ExternalSecret custom resources (CRs) in the namespace:

        kubectl get externalsecret kaniko-docker-config -n edp-delivery-os-dev
        + Container Registry Reset - EPAM Delivery Platform      

        Container Registry Reset⚓︎

        Problem⚓︎

        Reset container registry is not feasible due to the RESET REGISTRY button not accessible.

        Cause⚓︎

        The KubeRocketCI Portal does not allow to reconfigure the registry because the registry secrets have external owners.

        Solution⚓︎

        Remove the kaniko-docker-config and regcred resources from both the ExternalSecret custom resources and the Kubernetes secrets.

        1. Check the kaniko-docker-config and regcred ExternalSecret custom resources (CRs) in the namespace:

          kubectl get externalsecret kaniko-docker-config -n edp-delivery-os-dev
           kubectl get externalsecret regcred -n edp-delivery-os-dev
           
        2. Delete the kaniko-docker-config and regcred ExternalSecret CRs using the commands provided below:

          kubectl delete externalsecret kaniko-docker-config -n edp-delivery-os-dev
           kubectl delete externalsecret regcred -n edp-delivery-os-dev
           
        3. Delete the kaniko-docker-config and regcred Kubernetes secrets if they have not been automatically deleted by the External Secrets Operator:

          kubectl delete secret kaniko-docker-config -n edp-delivery-os-dev
           kubectl delete secret regcred -n edp-delivery-os-dev
          -
        4. Disable the creation of the kaniko-docker-config and regcred ExternalSecret CRs in the values file of the edp-install Helm chart.

          Note

          By default, it takes 5-10 minutes to take affect but it may vary depending on your personal platform configuration.

        5. Refresh the KubeRocketCI portal page and set your container registry again.

        \ No newline at end of file +
      2. Disable the creation of the kaniko-docker-config and regcred ExternalSecret CRs in the values file of the edp-install Helm chart.

        Note

        By default, it takes 5-10 minutes to take affect but it may vary depending on your personal platform configuration.

      3. Refresh the KubeRocketCI portal page and set your container registry again.

      \ No newline at end of file diff --git a/operator-guide/troubleshooting/troubleshoot-git-server/index.html b/operator-guide/troubleshooting/troubleshoot-git-server/index.html index a3129f470..da5641a7b 100644 --- a/operator-guide/troubleshooting/troubleshoot-git-server/index.html +++ b/operator-guide/troubleshooting/troubleshoot-git-server/index.html @@ -1 +1 @@ - Codebase Creation Issue - EPAM Delivery Platform

      Codebase Creation Issue⚓︎

      This troubleshooting page covers issues related to codebases.

      Problem⚓︎

      Application can't be added in the KubeRocketCI portal because of insufficient rights:

      Insufficient permissions error
      Insufficient permissions error

      Cause⚓︎

      It is likely that the token that is created and used for Git Server integration lacks specific rights.

      Solution⚓︎

      To fix the problem, try the following method:

      1. Adjust token rights or create a new token with the required set of permissions. Please refer to the Integrate GitHub/GitLab in Tekton for details.

      2. Ensure to set correct Git Server properties. The process of adding a Git Server is described in the Manage Git Servers page.

      3. Retry to create application.

        Codebase created
        Codebase created

      \ No newline at end of file + Codebase Creation Issue - EPAM Delivery Platform

      Codebase Creation Issue⚓︎

      This troubleshooting page covers issues related to codebases.

      Problem⚓︎

      Application can't be added in the KubeRocketCI portal because of insufficient rights:

      Insufficient permissions error
      Insufficient permissions error

      Cause⚓︎

      It is likely that the token that is created and used for Git Server integration lacks specific rights.

      Solution⚓︎

      To fix the problem, try the following method:

      1. Adjust token rights or create a new token with the required set of permissions. Please refer to the Integrate GitHub/GitLab in Tekton for details.

      2. Ensure to set correct Git Server properties. The process of adding a Git Server is described in the Manage Git Servers page.

      3. Retry to create application.

        Codebase created
        Codebase created

      \ No newline at end of file diff --git a/operator-guide/troubleshooting/troubleshoot-stages/index.html b/operator-guide/troubleshooting/troubleshoot-stages/index.html index 2eacfbe74..aaea16e0d 100644 --- a/operator-guide/troubleshooting/troubleshoot-stages/index.html +++ b/operator-guide/troubleshooting/troubleshoot-stages/index.html @@ -1,5 +1,5 @@ - Application Is Not Deployed - EPAM Delivery Platform

      Application Is Not Deployed⚓︎

      Problem⚓︎

      In the Stage Detail page, the application status is unknown. The deploy button doesn't work when attempting to create a stage.

      Stage is not synced
      Stage is not synced

      Cause⚓︎

      The AppProject custom resource may not be created or created incorrectly.

      Solution⚓︎

      1. Check logs of the application set controller pod using the kubectl logs command:

        kubectl get pods -n argocd
        + Application Is Not Deployed - EPAM Delivery Platform      

        Application Is Not Deployed⚓︎

        Problem⚓︎

        In the Stage Detail page, the application status is unknown. The deploy button doesn't work when attempting to create a stage.

        Stage is not synced
        Stage is not synced

        Cause⚓︎

        The AppProject custom resource may not be created or created incorrectly.

        Solution⚓︎

        1. Check logs of the application set controller pod using the kubectl logs command:

          kubectl get pods -n argocd
           kubectl logs <argo-cd-argocd-applicationset-controller-pod-name> -n argocd
           

          If the problem is related to the AppProject Argo CD resource, then you are supposed to see the following error message format:

          error generating application from params: failed to execute go template {{ .cluster }}: template: :1:3: executing "" at <.cluster>: map has no entry for key "cluster"
           
        2. Check the AppProject Resource:

          kubectl get appprojects -n argocd
          -

          It is supposed there are no resources or they are deployed incorrectly.

        3. Create the AppProject custom resource. Please refer to the 4th step of the Argo CD Integration page.

        4. Apply the resource using the kubectl apply command and wait until the application is synced with Argo CD.

        5. Check the status of the application:

          Stage is now synced
          Stage is now synced

        \ No newline at end of file +

        It is supposed there are no resources or they are deployed incorrectly.

      2. Create the AppProject custom resource. Please refer to the 4th step of the Argo CD Integration page.

      3. Apply the resource using the kubectl apply command and wait until the application is synced with Argo CD.

      4. Check the status of the application:

        Stage is now synced
        Stage is now synced

      \ No newline at end of file diff --git a/operator-guide/upgrade-edp-3.0/index.html b/operator-guide/upgrade-edp-3.0/index.html index 79432cbab..622422637 100644 --- a/operator-guide/upgrade-edp-3.0/index.html +++ b/operator-guide/upgrade-edp-3.0/index.html @@ -1,4 +1,4 @@ - Upgrade EDP v2.12 to 3.0 - EPAM Delivery Platform

      Upgrade EDP v2.12 to 3.0⚓︎

      Important

      • Before starting the upgrade procedure, please make the necessary backups.
      • Kiosk integration is disabled by default. With EDP below v.3.0.x, define the global.kioskEnabled parameter in the values.yaml file. For details, please refer to the Set Up Kiosk page.
      • The gerrit-ssh-port parameter is moved from the gerrit-operator.gerrit.sshport to global.gerritSSHPort values.yaml file.
      • In edp-gerrit-operator, the gitServer.user value is changed from the jenkins to edp-civalues.yaml file.

      This section provides the details on upgrading EDP to 3.0. Explore the actions and requirements below.

      1. Update Custom Resource Definitions (CRDs). Run the following command to apply all necessary CRDs to the cluster:

        kubectl apply -f https://raw.githubusercontent.com/epam/edp-gerrit-operator/d9a4d15244c527ef6d1d029af27574282a281b98/deploy-templates/crds/v2.edp.epam.com_gerrits.yaml
        + Upgrade EDP v2.12 to 3.0 - EPAM Delivery Platform      

        Upgrade EDP v2.12 to 3.0⚓︎

        Important

        • Before starting the upgrade procedure, please make the necessary backups.
        • Kiosk integration is disabled by default. With EDP below v.3.0.x, define the global.kioskEnabled parameter in the values.yaml file. For details, please refer to the Set Up Kiosk page.
        • The gerrit-ssh-port parameter is moved from the gerrit-operator.gerrit.sshport to global.gerritSSHPort values.yaml file.
        • In edp-gerrit-operator, the gitServer.user value is changed from the jenkins to edp-civalues.yaml file.

        This section provides the details on upgrading EDP to 3.0. Explore the actions and requirements below.

        1. Update Custom Resource Definitions (CRDs). Run the following command to apply all necessary CRDs to the cluster:

          kubectl apply -f https://raw.githubusercontent.com/epam/edp-gerrit-operator/d9a4d15244c527ef6d1d029af27574282a281b98/deploy-templates/crds/v2.edp.epam.com_gerrits.yaml
           kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/release/2.14/deploy-templates/crds/v2.edp.epam.com_cdstagedeployments.yaml
           kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/release/2.14/deploy-templates/crds/v2.edp.epam.com_codebasebranches.yaml
           kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/release/2.14/deploy-templates/crds/v2.edp.epam.com_codebaseimagestreams.yaml
          @@ -50,4 +50,4 @@
           
          • Remove the edp-jenkins-dotnet-21-agent agent manifest.
          • Restart the Jenkins pod.
        2. Attach the id_rsa.pub SSH public key from the gerrit-ciuser-sshkey secret to the edp-ci Gerrit user in the gerrit pod:

          ssh -p <gerrit_ssh_port> <host> gerrit set-account --add-ssh-key ~/id_rsa.pub
           

          Notes

          • For this operation, use the gerrit-admin SSH key from secrets.
          • <host> is admin@localhost or any other user with permissions.
        3. Change the username from jenkins to edp-ci in the gerrit-ciuser-sshkey secret:

          kubectl -n <edp-namespace> patch secret gerrit-ciuser-sshkey\
            --patch="{\"data\": { \"username\": \"$(echo -n edp-ci |base64 -w0)\" }}" -oyaml
          -

        Warning

        In EDP v.3.0.x, Admin Console is deprecated, and EDP interface is available only via EDP Portal.

        \ No newline at end of file +

      Warning

      In EDP v.3.0.x, Admin Console is deprecated, and EDP interface is available only via EDP Portal.

      \ No newline at end of file diff --git a/operator-guide/upgrade-edp-3.1/index.html b/operator-guide/upgrade-edp-3.1/index.html index 62333e2a5..f7116c489 100644 --- a/operator-guide/upgrade-edp-3.1/index.html +++ b/operator-guide/upgrade-edp-3.1/index.html @@ -1,4 +1,4 @@ - v3.0 to 3.1 - EPAM Delivery Platform

      Upgrade EDP v3.0 to 3.1⚓︎

      Important

      We suggest making a backup of the EDP environment before starting the upgrade procedure.

      This section provides the details on the EDP upgrade to v3.1. Explore the actions and requirements below.

      1. Update Custom Resource Definitions (CRDs). Run the following command to apply all necessary CRDs to the cluster:

        kubectl apply -f https://raw.githubusercontent.com/epam/edp-jenkins-operator/v2.13.2/deploy-templates/crds/v2.edp.epam.com_jenkins.yaml
        + v3.0 to 3.1 - EPAM Delivery Platform      

        Upgrade EDP v3.0 to 3.1⚓︎

        Important

        We suggest making a backup of the EDP environment before starting the upgrade procedure.

        This section provides the details on the EDP upgrade to v3.1. Explore the actions and requirements below.

        1. Update Custom Resource Definitions (CRDs). Run the following command to apply all necessary CRDs to the cluster:

          kubectl apply -f https://raw.githubusercontent.com/epam/edp-jenkins-operator/v2.13.2/deploy-templates/crds/v2.edp.epam.com_jenkins.yaml
           kubectl apply -f https://raw.githubusercontent.com/epam/edp-gerrit-operator/v2.13.4/deploy-templates/crds/v2.edp.epam.com_gerrits.yaml
           
        2. To upgrade EDP to the v3.1, run the following command:

          helm upgrade edp epamedp/edp-install -n <edp-namespace> --values values.yaml --version=3.1.0
          -

          Note

          To verify the installation, it is possible to test the deployment before applying it to the cluster with the following command:
          helm upgrade edp epamedp/edp-install -n <edp-namespace> --values values.yaml --version=3.1.0 --dry-run

        \ No newline at end of file +

        Note

        To verify the installation, it is possible to test the deployment before applying it to the cluster with the following command:
        helm upgrade edp epamedp/edp-install -n <edp-namespace> --values values.yaml --version=3.1.0 --dry-run

      \ No newline at end of file diff --git a/operator-guide/upgrade-edp-3.2/index.html b/operator-guide/upgrade-edp-3.2/index.html index 69bbaecda..8a3cd270c 100644 --- a/operator-guide/upgrade-edp-3.2/index.html +++ b/operator-guide/upgrade-edp-3.2/index.html @@ -1,4 +1,4 @@ - v3.1 to 3.2 - EPAM Delivery Platform

      Upgrade EDP v3.1 to 3.2⚓︎

      Important

      We suggest making a backup of the EDP environment before starting the upgrade procedure.

      This section provides the details on the EDP upgrade to v3.2.2. Explore the actions and requirements below.

      1. Update Custom Resource Definitions (CRDs). Run the following command to apply all necessary CRDs to the cluster:

        kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/v2.15.0/deploy-templates/crds/v2.edp.epam.com_cdstagedeployments.yaml
        + v3.1 to 3.2 - EPAM Delivery Platform      

        Upgrade EDP v3.1 to 3.2⚓︎

        Important

        We suggest making a backup of the EDP environment before starting the upgrade procedure.

        This section provides the details on the EDP upgrade to v3.2.2. Explore the actions and requirements below.

        1. Update Custom Resource Definitions (CRDs). Run the following command to apply all necessary CRDs to the cluster:

          kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/v2.15.0/deploy-templates/crds/v2.edp.epam.com_cdstagedeployments.yaml
           kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/v2.15.0/deploy-templates/crds/v2.edp.epam.com_codebasebranches.yaml
           kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/v2.15.0/deploy-templates/crds/v2.edp.epam.com_codebaseimagestreams.yaml
           kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/v2.15.0/deploy-templates/crds/v2.edp.epam.com_codebases.yaml
          @@ -83,4 +83,4 @@
                 # --  Storageclass for Nexus data volume
                 class: gp2
           
        2. To upgrade EDP to the v3.2.2, run the following command:

          helm upgrade edp epamedp/edp-install -n <edp-namespace> --values values.yaml --version=3.2.2
          -

          Note

          To verify the installation, it is possible to test the deployment before applying it to the cluster with the following command:
          helm upgrade edp epamedp/edp-install -n <edp-namespace> --values values.yaml --version=3.2.2 --dry-run

        \ No newline at end of file +

        Note

        To verify the installation, it is possible to test the deployment before applying it to the cluster with the following command:
        helm upgrade edp epamedp/edp-install -n <edp-namespace> --values values.yaml --version=3.2.2 --dry-run

      \ No newline at end of file diff --git a/operator-guide/upgrade-edp-3.3/index.html b/operator-guide/upgrade-edp-3.3/index.html index 96f39be40..19d81a143 100644 --- a/operator-guide/upgrade-edp-3.3/index.html +++ b/operator-guide/upgrade-edp-3.3/index.html @@ -1,8 +1,8 @@ - v3.2 to 3.3 - EPAM Delivery Platform

      Upgrade EDP v3.2 to 3.3⚓︎

      Important

      We suggest making a backup of the EDP environment before starting the upgrade procedure.

      Note

      We currently disabled cache volumes for go and npm in the EDP 3.3 release.

      This section provides the details on the EDP upgrade to v3.3.0. Explore the actions and requirements below.

      1. Update Custom Resource Definitions (CRDs). Run the following command to apply all necessary CRDs to the cluster:

        kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/v2.16.0/deploy-templates/crds/v2.edp.epam.com_codebases.yaml
        + v3.2 to 3.3 - EPAM Delivery Platform      

        Upgrade EDP v3.2 to 3.3⚓︎

        Important

        We suggest making a backup of the EDP environment before starting the upgrade procedure.

        Note

        We currently disabled cache volumes for go and npm in the EDP 3.3 release.

        This section provides the details on the EDP upgrade to v3.3.0. Explore the actions and requirements below.

        1. Update Custom Resource Definitions (CRDs). Run the following command to apply all necessary CRDs to the cluster:

          kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/v2.16.0/deploy-templates/crds/v2.edp.epam.com_codebases.yaml
           kubectl apply -f https://raw.githubusercontent.com/epam/edp-jenkins-operator/v2.15.0/deploy-templates/crds/v2.edp.epam.com_jenkins.yaml
           
        2. If you use Gerrit VCS, delete the corresponding resource due to changes in annotations:

          kubectl -n edp delete EDPComponent gerrit
           
          The deployment will create a new EDPComponent called gerrit instead.
        3. To upgrade EDP to the v3.3.0, run the following command:

          helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.3.0
           

          Note

          To verify the installation, it is possible to test the deployment before applying it to the cluster with the --dry-run tag:
          helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.3.0 --dry-run

        4. In EDP v3.3.0, a new feature was introduced allowing manual pipeline re-triggering by sending a comment with /recheck. To enable the re-trigger feature for applications that were added before the upgrade, please proceed with the following:

          4.1 For Gerrit VCS, add the following event to the webhooks.config configuration file in the All-Projects repository:

          [remote "commentadded"]
             url = http://el-gerrit-listener:8080
             event = comment-added
          -

          4.2 For GitHub VCS, check the Issue comments permission for each webhook in every application added before the EDP upgrade to 3.3.0.

          4.3 For GitLab VCS, check the Comments permission for each webhook in every application added before the EDP upgrade to 3.3.0.

        \ No newline at end of file +

        4.2 For GitHub VCS, check the Issue comments permission for each webhook in every application added before the EDP upgrade to 3.3.0.

        4.3 For GitLab VCS, check the Comments permission for each webhook in every application added before the EDP upgrade to 3.3.0.

      \ No newline at end of file diff --git a/operator-guide/upgrade-edp-3.4/index.html b/operator-guide/upgrade-edp-3.4/index.html index 0f0fbcd4f..8db278855 100644 --- a/operator-guide/upgrade-edp-3.4/index.html +++ b/operator-guide/upgrade-edp-3.4/index.html @@ -1,4 +1,4 @@ - v3.3 to 3.4 - EPAM Delivery Platform

      Upgrade EDP v3.3 to 3.4⚓︎

      Important

      We suggest making a backup of the EDP environment before starting the upgrade procedure.

      Note

      Pay attention that the following components: perf-operator, edp-admin-console, edp-admin-console-operator, and edp-jenkins-operator are deprecated and should be additionally migrated in order to avoid their deletion. For migration details, please refer to the Migrate CI Pipelines From Jenkins to Tekton instruction.

      This section provides the details on the EDP upgrade to v3.4.1. Explore the actions and requirements below.

      1. Update Custom Resource Definitions (CRDs). Run the following command to apply all necessary CRDs to the cluster:

        kubectl apply -f https://raw.githubusercontent.com/epam/edp-cd-pipeline-operator/v2.15.0/deploy-templates/crds/v2.edp.epam.com_cdpipelines.yaml
        + v3.3 to 3.4 - EPAM Delivery Platform      

        Upgrade EDP v3.3 to 3.4⚓︎

        Important

        We suggest making a backup of the EDP environment before starting the upgrade procedure.

        Note

        Pay attention that the following components: perf-operator, edp-admin-console, edp-admin-console-operator, and edp-jenkins-operator are deprecated and should be additionally migrated in order to avoid their deletion. For migration details, please refer to the Migrate CI Pipelines From Jenkins to Tekton instruction.

        This section provides the details on the EDP upgrade to v3.4.1. Explore the actions and requirements below.

        1. Update Custom Resource Definitions (CRDs). Run the following command to apply all necessary CRDs to the cluster:

          kubectl apply -f https://raw.githubusercontent.com/epam/edp-cd-pipeline-operator/v2.15.0/deploy-templates/crds/v2.edp.epam.com_cdpipelines.yaml
           kubectl apply -f https://raw.githubusercontent.com/epam/edp-cd-pipeline-operator/v2.15.0/deploy-templates/crds/v2.edp.epam.com_stages.yaml
           kubectl apply -f https://raw.githubusercontent.com/epam/edp-keycloak-operator/v1.17.0/deploy-templates/crds/v1.edp.epam.com_clusterkeycloakrealms.yaml
           kubectl apply -f https://raw.githubusercontent.com/epam/edp-keycloak-operator/v1.17.0/deploy-templates/crds/v1.edp.epam.com_clusterkeycloaks.yaml
          @@ -43,4 +43,4 @@
             }
           }
           
        2. To upgrade EDP to the v3.4.1, run the following command:

          helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.4.1
          -

          Note

          To verify the installation, it is possible to test the deployment before applying it to the cluster with the --dry-run tag:
          helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.4.1 --dry-run

        \ No newline at end of file +

        Note

        To verify the installation, it is possible to test the deployment before applying it to the cluster with the --dry-run tag:
        helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.4.1 --dry-run

      \ No newline at end of file diff --git a/operator-guide/upgrade-edp-3.5/index.html b/operator-guide/upgrade-edp-3.5/index.html index edced5eaa..f0c9bcc5a 100644 --- a/operator-guide/upgrade-edp-3.5/index.html +++ b/operator-guide/upgrade-edp-3.5/index.html @@ -1,4 +1,4 @@ - v3.4 to 3.5 - EPAM Delivery Platform

      Upgrade EDP v3.4 to 3.5⚓︎

      Important

      We suggest making a backup of the EDP environment before starting the upgrade procedure.

      This section provides detailed instructions for upgrading EPAM Delivery Platform to version 3.5.3. Follow the steps and requirements outlined below:

      1. Update Custom Resource Definitions (CRDs). Run the following command to apply all necessary CRDs to the cluster:

        kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/v2.19.0/deploy-templates/crds/v2.edp.epam.com_gitservers.yaml
        + v3.4 to 3.5 - EPAM Delivery Platform      

        Upgrade EDP v3.4 to 3.5⚓︎

        Important

        We suggest making a backup of the EDP environment before starting the upgrade procedure.

        This section provides detailed instructions for upgrading EPAM Delivery Platform to version 3.5.3. Follow the steps and requirements outlined below:

        1. Update Custom Resource Definitions (CRDs). Run the following command to apply all necessary CRDs to the cluster:

          kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/v2.19.0/deploy-templates/crds/v2.edp.epam.com_gitservers.yaml
           

          Danger

          Codebase-operator v2.19.0 is not compatible with the previous versions. Please become familiar with the breaking change in Git Server Custom Resource Definition.

        2. Familiarize yourself with the updated file structure of the values.yaml file and adjust your values.yaml file accordingly:

          1. By default, the deployment of sub components such as edp-sonar-operator, edp-nexus-operator, edp-gerrit-operator, and keycloak-operator, have been disabled. Set them back to true in case they are needed or manually deploy external tools, such as SonarQube, Nexus, Gerrit and integrate them with the EPAM Delivery Platform.

          2. The default Git provider has been changed from Gerrit to GitHub:

            Old format:

            global:
               gitProvider: gerrit
               gerritSSHPort: "22"
            @@ -94,4 +94,4 @@
               "secretString": "xxxxxxxxxxxxxx"
               }
             

        The tables below illustrate the difference between the old and new format:

        Old format

        Secret Name Username Password Token Secret URL
        jira-user * *
        nexus-ci.user * *
        sonar-ciuser-token * *
        defectdojo-ciuser-token * *
        ci-dependency-track *

        New format

        Secret Name Username Password Token URL
        ci-jira * *
        ci-nexus * * *
        ci-sonarqube * *
        ci-defectdojo * *
        ci-dependency-track * *
      2. To upgrade EDP to the v3.5.3, run the following command:

        helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.5.3
        -

        Note

        To verify the installation, it is possible to test the deployment before applying it to the cluster with the --dry-run tag:
        helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.5.3 --dry-run

      \ No newline at end of file +

      Note

      To verify the installation, it is possible to test the deployment before applying it to the cluster with the --dry-run tag:
      helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.5.3 --dry-run

      \ No newline at end of file diff --git a/operator-guide/upgrade-edp-3.6/index.html b/operator-guide/upgrade-edp-3.6/index.html index 44140fe77..1193ee371 100644 --- a/operator-guide/upgrade-edp-3.6/index.html +++ b/operator-guide/upgrade-edp-3.6/index.html @@ -1,4 +1,4 @@ - v3.5 to 3.6 - EPAM Delivery Platform

      Upgrade EDP v3.5 to 3.6⚓︎

      Important

      We suggest backing up the EDP environment before starting the upgrade procedure.

      This section provides detailed instructions for upgrading the EPAM Delivery Platform to version 3.6.0. Follow the steps and requirements outlined below:

      1. Update Custom Resource Definitions (CRDs). Run the following command to apply all the necessary CRDs to the cluster:

        kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/v2.20.0/deploy-templates/crds/v2.edp.epam.com_codebases.yaml
        + v3.5 to 3.6 - EPAM Delivery Platform      

        Upgrade EDP v3.5 to 3.6⚓︎

        Important

        We suggest backing up the EDP environment before starting the upgrade procedure.

        This section provides detailed instructions for upgrading the EPAM Delivery Platform to version 3.6.0. Follow the steps and requirements outlined below:

        1. Update Custom Resource Definitions (CRDs). Run the following command to apply all the necessary CRDs to the cluster:

          kubectl apply -f https://raw.githubusercontent.com/epam/edp-codebase-operator/v2.20.0/deploy-templates/crds/v2.edp.epam.com_codebases.yaml
           
        2. Familiarize yourself with the updated structure of the values.yaml file and adjust it accordingly:

          2.1 A new parameter called space has been added to the DockerRegistry section. It is designed to form URLs in CodebaseImageStreams. This parameter is set the same as the EPAM Delivery Platform namespace name. Ensure you define the space parameter prior to the update.

          Warning

          This parameter is a significant change and must be set before the update.

          global:
             dockerRegistry:
               type: "harbor"
          @@ -11,4 +11,4 @@
             # none - not enable secrets management logic;
             secretManager: none
           
        3. To upgrade EDP to the v3.6.0, run the following command:

          helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.6.0
          -

          Note

          To verify the installation, it is possible to test the deployment before applying it to the cluster with the --dry-run tag:
          helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.6.0 --dry-run

        \ No newline at end of file +

        Note

        To verify the installation, it is possible to test the deployment before applying it to the cluster with the --dry-run tag:
        helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.6.0 --dry-run

      \ No newline at end of file diff --git a/operator-guide/upgrade-edp-3.7/index.html b/operator-guide/upgrade-edp-3.7/index.html index 34c52885f..f6c55905b 100644 --- a/operator-guide/upgrade-edp-3.7/index.html +++ b/operator-guide/upgrade-edp-3.7/index.html @@ -1,2 +1,2 @@ - v3.6 to 3.7 - EPAM Delivery Platform

      Upgrade EDP v3.6 to 3.7⚓︎

      Important

      We suggest backing up the EDP environment before starting the upgrade procedure.

      This section provides detailed instructions for upgrading the EPAM Delivery Platform to version 3.7.5. Follow the steps and requirements outlined below:

      1. To upgrade EDP to the v3.7.5, run the following command:

        helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.7.5
        -

        Note

        To verify the installation, it is possible to test the deployment before applying it to the cluster with the --dry-run tag:
        helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.7.5 --dry-run

      \ No newline at end of file + v3.6 to 3.7 - EPAM Delivery Platform

      Upgrade EDP v3.6 to 3.7⚓︎

      Important

      We suggest backing up the EDP environment before starting the upgrade procedure.

      This section provides detailed instructions for upgrading the EPAM Delivery Platform to version 3.7.5. Follow the steps and requirements outlined below:

      1. To upgrade EDP to the v3.7.5, run the following command:

        helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.7.5
        +

        Note

        To verify the installation, it is possible to test the deployment before applying it to the cluster with the --dry-run tag:
        helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.7.5 --dry-run

      \ No newline at end of file diff --git a/operator-guide/upgrade-edp-3.8/index.html b/operator-guide/upgrade-edp-3.8/index.html index 06ca4b082..0a6483655 100644 --- a/operator-guide/upgrade-edp-3.8/index.html +++ b/operator-guide/upgrade-edp-3.8/index.html @@ -1,4 +1,4 @@ - v3.7 to 3.8 - EPAM Delivery Platform

      Upgrade KubeRocketCI v3.7 to 3.8⚓︎

      Important

      We suggest backing up the KubeRocketCI environment before starting the upgrade procedure.

      This section provides detailed instructions for upgrading the KubeRocketCI to the 3.8.1 version. Follow the steps and requirements outlined below:

      1. Delete the following resources:

        kubectl -n edp delete ingress edp-headlamp
        + v3.7 to 3.8 - EPAM Delivery Platform      

        Upgrade KubeRocketCI v3.7 to 3.8⚓︎

        Important

        We suggest backing up the KubeRocketCI environment before starting the upgrade procedure.

        This section provides detailed instructions for upgrading the KubeRocketCI to the 3.8.1 version. Follow the steps and requirements outlined below:

        1. Delete the following resources:

          kubectl -n edp delete ingress edp-headlamp
           kubectl -n edp delete ingress edp-tekton-dashboard
           kubectl -n edp delete ingress el-github-listener
           
        2. Delete the following Custom Resource Definitions (CRDs):

          kubectl delete CustomResourceDefinition edpcomponents.v1.edp.epam.com
          @@ -72,4 +72,4 @@
           

        Open repository in Github, navigate to Settings -> Webhooks -> Select exist webhook and click edit. Change URL:

        Old value: https://el-gitlab-listener-edp.<dns_wildcard>
         
         New value: https://el-gitlab-edp.<dns_wildcard>
        -
      \ No newline at end of file +
      \ No newline at end of file diff --git a/operator-guide/upgrade-edp-3.9/index.html b/operator-guide/upgrade-edp-3.9/index.html index 127492dd7..7d64335b8 100644 --- a/operator-guide/upgrade-edp-3.9/index.html +++ b/operator-guide/upgrade-edp-3.9/index.html @@ -1,4 +1,4 @@ - v3.8 to 3.9 - EPAM Delivery Platform

      Upgrade KubeRocketCI v3.8 to 3.9⚓︎

      Important

      We suggest backing up the KubeRocketCI environment before starting the upgrade procedure.

      This section provides detailed instructions for upgrading the KubeRocketCI to version 3.9.0. Follow the steps and requirements outlined below:

      Warning

      Starting from version v.3.9.x, KubeRocketCI no longer supports Kiosk as a tenancy engine tool. Please migrate to the Capsule engine or disable this option.

      1. (Optional) Migrate from Kiosk tenancy engine.

        1. Take look how to install Capsule using edp-cluster addons.
        2. Integrate Capsule with EDP platform.
        3. Update edp-install values file:
        values.yaml
        ...
        + v3.8 to 3.9 - EPAM Delivery Platform      

        Upgrade KubeRocketCI v3.8 to 3.9⚓︎

        Important

        We suggest backing up the KubeRocketCI environment before starting the upgrade procedure.

        This section provides detailed instructions for upgrading the KubeRocketCI to version 3.9.0. Follow the steps and requirements outlined below:

        Warning

        Starting from version v.3.9.x, KubeRocketCI no longer supports Kiosk as a tenancy engine tool. Please migrate to the Capsule engine or disable this option.

        1. (Optional) Migrate from Kiosk tenancy engine.

          1. Take look how to install Capsule using edp-cluster addons.
          2. Integrate Capsule with EDP platform.
          3. Update edp-install values file:
          values.yaml
          ...
           cd-pipeline-operator:
             tenancyEngine: "capsule"
           ...
          @@ -38,4 +38,4 @@
             # -- Keycloak URL.
             keycloakUrl: https://keycloak.example.com/auth
           
        2. To initiate upgrading to the v3.9.0 version, run the following command:

          helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.9.0
          -

          Note

          To verify the installation, test the deployment before applying it to the cluster with the --dry-run tag:
          helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.9.0 --dry-run

        \ No newline at end of file +

        Note

        To verify the installation, test the deployment before applying it to the cluster with the --dry-run tag:
        helm upgrade edp epamedp/edp-install -n edp --values values.yaml --version=3.9.0 --dry-run

      \ No newline at end of file diff --git a/operator-guide/upgrade-keycloak-19.0/index.html b/operator-guide/upgrade-keycloak-19.0/index.html index 54de3386e..48031fdcd 100644 --- a/operator-guide/upgrade-keycloak-19.0/index.html +++ b/operator-guide/upgrade-keycloak-19.0/index.html @@ -1,4 +1,4 @@ - v17.0 to 19.0 - EPAM Delivery Platform

      Upgrade Keycloak v17.0 to 19.0⚓︎

      Starting from Keycloak v.18.x.x, the Keycloak server has been moved from the Wildfly (JBoss) Application Server to Quarkus framework and is called Keycloak.X.

      There are two ways to upgrade Keycloak v.17.0.x-legacy to v.19.0.x on Kubernetes, please perform the steps described in the Prerequisites section of this tutorial, and then select a suitable upgrade strategy for your environment:

      Prerequisites ⚓︎

      Before upgrading Keycloak, please perform the steps below:

      1. Create a backup/snapshot of the Keycloak database volume. Locate the AWS volumeID and then create its snapshot on AWS:

      \ No newline at end of file diff --git a/operator-guide/vcs/index.html b/operator-guide/vcs/index.html index 54c87f336..c45572745 100644 --- a/operator-guide/vcs/index.html +++ b/operator-guide/vcs/index.html @@ -1 +1 @@ - Overview - EPAM Delivery Platform

      Overview⚓︎

      The Version Control Systems (VCS) section is dedicated to delivering comprehensive information on VCS within the EPAM Delivery Platform. This section comprises detailed descriptions of all the deployment strategies, along with valuable recommendations for their optimal usage, and the list of supported VCS, facilitating seamless integration with EDP.

      Supported VCS⚓︎

      EDP can be integrated with the following Version Control Systems:

      • Gerrit (used by default);
      • GitHub;
      • GitLab.

      Note

      So far, EDP doesn't support authorization mechanisms in the upstream GitLab.

      VCS Deployment Strategies⚓︎

      EDP offers the following strategies to work with repositories:

      • Create from template – creates a project on the pattern in accordance with an application language, a build tool, and a framework selected while creating application. This strategy is recommended for projects that start developing their applications from scratch.

      Note

      Under the hood, all the built-in application frameworks, build tools and frameworks are stored in our public GitHub repository.

      • Import project - enables working with the repository located in the added Git server. This scenario is preferred when the users already have an application stored in their own pre-configured repository and intends to continue working with their repository while also utilizing EDP simultaneously.

      Note

      In order to use the Import project strategy, make sure to adjust it with the Integrate GitHub/GitLab in Tekton page. The Import project strategy is not applicable for Gerrit. Also, it is impossible to choose the Empty project field when using the Import project strategy while creating appication since it is implied that you already have a ready-to-work application in your own repository, whereas the "Empty project" option creates a repository but doesn't put anything in it.

      • Clone project – clones the indicated repository into EPAM Delivery Platform. In this scenario, the application repository is forked from the original application repository to EDP. Since EDP doesn't support multiple VCS integration for now, this strategy is recommended when the user has several applications located in several repositories.
      \ No newline at end of file + Overview - EPAM Delivery Platform

      Overview⚓︎

      The Version Control Systems (VCS) section is dedicated to delivering comprehensive information on VCS within the EPAM Delivery Platform. This section comprises detailed descriptions of all the deployment strategies, along with valuable recommendations for their optimal usage, and the list of supported VCS, facilitating seamless integration with EDP.

      Supported VCS⚓︎

      EDP can be integrated with the following Version Control Systems:

      • Gerrit (used by default);
      • GitHub;
      • GitLab.

      Note

      So far, EDP doesn't support authorization mechanisms in the upstream GitLab.

      VCS Deployment Strategies⚓︎

      EDP offers the following strategies to work with repositories:

      • Create from template – creates a project on the pattern in accordance with an application language, a build tool, and a framework selected while creating application. This strategy is recommended for projects that start developing their applications from scratch.

      Note

      Under the hood, all the built-in application frameworks, build tools and frameworks are stored in our public GitHub repository.

      • Import project - enables working with the repository located in the added Git server. This scenario is preferred when the users already have an application stored in their own pre-configured repository and intends to continue working with their repository while also utilizing EDP simultaneously.

      Note

      In order to use the Import project strategy, make sure to adjust it with the Integrate GitHub/GitLab in Tekton page. The Import project strategy is not applicable for Gerrit. Also, it is impossible to choose the Empty project field when using the Import project strategy while creating appication since it is implied that you already have a ready-to-work application in your own repository, whereas the "Empty project" option creates a repository but doesn't put anything in it.

      • Clone project – clones the indicated repository into EPAM Delivery Platform. In this scenario, the application repository is forked from the original application repository to EDP. Since EDP doesn't support multiple VCS integration for now, this strategy is recommended when the user has several applications located in several repositories.
      \ No newline at end of file diff --git a/operator-guide/velero-irsa/index.html b/operator-guide/velero-irsa/index.html index a4e4a3ef7..244a3c33f 100644 --- a/operator-guide/velero-irsa/index.html +++ b/operator-guide/velero-irsa/index.html @@ -1,4 +1,4 @@ - IAM Roles for Velero Service Accounts - EPAM Delivery Platform

      IAM Roles for Velero Service Accounts⚓︎

      Note

      Make sure that IRSA is enabled and amazon-eks-pod-identity-webhook is deployed according to the Associate IAM Roles With Service Accounts documentation.

      Velero AWS plugin requires access to AWS resources. Follow the steps below to create a required role:

      1. Create AWS IAM Policy "AWSIRSA‹CLUSTER_NAME›‹VELERO_NAMESPACE›Velero_policy":

        {
        + IAM Roles for Velero Service Accounts - EPAM Delivery Platform      

        IAM Roles for Velero Service Accounts⚓︎

        Note

        Make sure that IRSA is enabled and amazon-eks-pod-identity-webhook is deployed according to the Associate IAM Roles With Service Accounts documentation.

        Velero AWS plugin requires access to AWS resources. Follow the steps below to create a required role:

        1. Create AWS IAM Policy "AWSIRSA‹CLUSTER_NAME›‹VELERO_NAMESPACE›Velero_policy":

          {
               "Version": "2012-10-17",
               "Statement": [
                   {
          @@ -54,4 +54,4 @@
              }
            ]
           }
          -
        2. Attach the "AWSIRSA‹CLUSTER_NAME›‹VELERO_NAMESPACE›Velero_policy" policy to the "AWSIRSA‹CLUSTER_NAME›‹VELERO_NAMESPACE›Velero" role.

        3. Make sure that Amazon S3 bucket with name velero-‹CLUSTER_NAME› exists.

        4. Provide key value eks.amazonaws.com/role-arn: "arn:aws:iam:::role/AWSIRSA‹CLUSTER_NAME›‹VELERO_NAMESPACE›Velero" into the serviceAccount.server.annotations parameter in values.yaml during the Velero Installation.

        \ No newline at end of file +
      2. Attach the "AWSIRSA‹CLUSTER_NAME›‹VELERO_NAMESPACE›Velero_policy" policy to the "AWSIRSA‹CLUSTER_NAME›‹VELERO_NAMESPACE›Velero" role.

      3. Make sure that Amazon S3 bucket with name velero-‹CLUSTER_NAME› exists.

      4. Provide key value eks.amazonaws.com/role-arn: "arn:aws:iam:::role/AWSIRSA‹CLUSTER_NAME›‹VELERO_NAMESPACE›Velero" into the serviceAccount.server.annotations parameter in values.yaml during the Velero Installation.

      \ No newline at end of file diff --git a/operator-guide/waf-tf-configuration/index.html b/operator-guide/waf-tf-configuration/index.html index 668b24d9b..2c566462f 100644 --- a/operator-guide/waf-tf-configuration/index.html +++ b/operator-guide/waf-tf-configuration/index.html @@ -1,4 +1,4 @@ - Configure AWS WAF With Terraform - EPAM Delivery Platform

      Configure AWS WAF With Terraform⚓︎

      This page contains accurate information on how to configure AWS WAF using Terraform with the aim to have a secured traffic exposure and to prevent the Host Header vulnerabilities.

      Prerequisites⚓︎

      To follow the instruction, check the following prerequisites:

      1. Deployed infrastructure includes Nginx Ingress Controller
      2. Deployed services for testing
      3. Separate and exposed AWS ALB
      4. terraform 0.14.10
      5. hishicorp/aws = 4.8.0

      Solution Overview⚓︎

      The solution includes two parts:

      1. Prerequisites (mostly the left part of the scheme) - AWS ALB, Compute Resources (EC2, EKS, etc.).
      2. WAF configuration (the right part of the scheme).

      The WAF ACL resource is the main resource used for the configuration; The default web ACL option is Block.

      Overview WAF Solution
      Overview WAF Solution

      The ACL includes three managed AWS rules that secure the exposed traffic:

      • AWS-AWSManagedRulesCommonRuleSet
      • AWS-AWSManagedRulesLinuxRuleSet
      • AWS-AWSManagedRulesKnownBadInputsRuleSet

      AWS provides a lot of rules such as baseline and use-case specific rules, for details, please refer to the Baseline rule groups.

      There is the PreventHostInjections rule that prevents the Host Header vulnerabilities. This rule includes one statement that declares that the Host Header should match Regex Pattern Set, thus only in this case it will be passed.

      The Regex Pattern Set is another resource that helps to organize regexes, in fact, is a set of regexes. All regexes added to the single set are matched by the OR statement, i.e. when exposing several URLs, it is necessary to add this statement to the set and refer to it in the rule.

      WAF ACL Configuration⚓︎

      To create the Regex Pattern Set, inspect the following code:

      resource "aws_wafv2_regex_pattern_set" "common" {
      + Configure AWS WAF With Terraform - EPAM Delivery Platform      

      Configure AWS WAF With Terraform⚓︎

      This page contains accurate information on how to configure AWS WAF using Terraform with the aim to have a secured traffic exposure and to prevent the Host Header vulnerabilities.

      Prerequisites⚓︎

      To follow the instruction, check the following prerequisites:

      1. Deployed infrastructure includes Nginx Ingress Controller
      2. Deployed services for testing
      3. Separate and exposed AWS ALB
      4. terraform 0.14.10
      5. hishicorp/aws = 4.8.0

      Solution Overview⚓︎

      The solution includes two parts:

      1. Prerequisites (mostly the left part of the scheme) - AWS ALB, Compute Resources (EC2, EKS, etc.).
      2. WAF configuration (the right part of the scheme).

      The WAF ACL resource is the main resource used for the configuration; The default web ACL option is Block.

      Overview WAF Solution
      Overview WAF Solution

      The ACL includes three managed AWS rules that secure the exposed traffic:

      • AWS-AWSManagedRulesCommonRuleSet
      • AWS-AWSManagedRulesLinuxRuleSet
      • AWS-AWSManagedRulesKnownBadInputsRuleSet

      AWS provides a lot of rules such as baseline and use-case specific rules, for details, please refer to the Baseline rule groups.

      There is the PreventHostInjections rule that prevents the Host Header vulnerabilities. This rule includes one statement that declares that the Host Header should match Regex Pattern Set, thus only in this case it will be passed.

      The Regex Pattern Set is another resource that helps to organize regexes, in fact, is a set of regexes. All regexes added to the single set are matched by the OR statement, i.e. when exposing several URLs, it is necessary to add this statement to the set and refer to it in the rule.

      WAF ACL Configuration⚓︎

      To create the Regex Pattern Set, inspect the following code:

      resource "aws_wafv2_regex_pattern_set" "common" {
         name  = "Common"
         scope = "REGIONAL"
       
      @@ -149,4 +149,4 @@
           }
         ]
       }
      -

      In the sample above, the module is used, but it is also possible to use a Terraform resource.

      \ No newline at end of file +

      In the sample above, the module is used, but it is also possible to use a Terraform resource.

      \ No newline at end of file diff --git a/overrides/main.html b/overrides/main.html index d2e09bc89..3a837bf84 100644 --- a/overrides/main.html +++ b/overrides/main.html @@ -3,9 +3,8 @@ {% block announce %} EDP Team Supports Ukraine 🇺🇦
      -In solidarity, we ask that you consider financially supporting the organizations below:
      -EPAM’s Global Partners - for the Ukraine Humanitarian Crisis + +We have migrated our documentation to the new domain https://docs.kuberocket.ci. Please use this link for accessing the latest documentation. {% endblock %} diff --git a/overview/index.html b/overview/index.html index 4a4e3ed3b..0bc543336 100644 --- a/overview/index.html +++ b/overview/index.html @@ -1 +1 @@ - Overview - EPAM Delivery Platform

      Overview⚓︎

      EPAM Delivery Platform (EDP) is an open-source cloud-agnostic SaaS/PaaS solution for software development, licensed under Apache License 2.0. It provides a pre-defined set of CI/CD patterns and tools, which allow a user to start product development quickly with established code review, release, versioning, branching, build processes. These processes include static code analysis, security checks, linters, validators, dynamic feature environments provisioning. EDP consolidates the top Open-Source CI/CD tools by running them on Kubernetes/OpenShift, which enables web/app development either in isolated (on-prem) or cloud environments.

      EPAM Delivery Platform, which is also called "The Rocket", is a platform that allows shortening the time that is passed before an active development can be started from several months to several hours.

      EDP consists of the following:

      • The platform based on managed infrastructure and container orchestration
      • Security covering authentication, authorization, and SSO for platform services
      • Development and testing toolset
      • Well-established engineering process and EPAM practices (EngX) reflected in CI/CD pipelines, and delivery analytics
      • Local development with debug capabilities

      Features⚓︎

      • Deployed and configured CI/CD toolset (Tekton, ArgoCD, Nexus, SonarQube, DefectDojo)
      • GitHub(by default), GitLab or Gerrit as a version control system for your code
      • Tekton is a pipeline orchestrator
      • CI pipelines

        Language Framework Build Tool Application Library Autotest
        Java Java 8, Java 11, Java 17 Gradle, Maven
        Python Python 3.8, FastAPI, Flask Python
        C# .Net 3.1, .Net 6.0 .Net
        Go Beego, Gin, Operator SDK Go
        JavaScript React, Vue, Angular, Express, Next.js, Antora NPM
        HCL Terraform Terraform
        Helm Helm, Pipeline Helm
        Groovy Codenarc Codenarc
        Rego OPA OPA
        Container Docker Kaniko
      • Portal UI as a single entry point
      • Environments for Microservice Deployment
      • Kubernetes native approach (CRD, CR) to declare CI/CD pipelines

      What's Inside⚓︎

      EPAM Delivery Platform (EDP) is suitable for all aspects of delivery starting from development including the capability to deploy production environment. EDP architecture is represented on a diagram below.

      Architecture
      Architecture

      EDP consists of four cross-cutting concerns:

      1. Infrastructure as a Service;
      2. GitOps approach;
      3. Container orchestration and centralized services;
      4. Security.

      On the top of these indicated concerns, EDP adds several blocks that include:

      • EDP CI/CD Components. EDP component enables a feature in CI/CD or an instance artifacts storage and distribution (Nexus or Artifactory), static code analysis (Sonar), etc.;
      • EDP Artifacts. This element represents an artifact that is being delivered through EDP and presented as a code.

        Artifact samples: frontend, backend, mobile, applications, functional and non-functional autotests, workloads for 3rd party components that can be deployed together with applications.

      • EDP development and production environments that share the same logic. Environments wrap a set of artifacts with a specific version, and allow performing SDLC routines in order to be sure of the artifacts quality;
      • Pipelines. Pipelines cover CI/CD process, production rollout and updates. They also connect three elements indicated above via automation allowing SDLC routines to be non-human;

      Technology Stack⚓︎

      Explore the EDP technology stack diagram

      Technology stack
      Technology stack

      The EDP IaaS layer supports most popular public clouds AWS, Azure and GCP keeping the capability to be deployed on private/hybrid clouds based on OpenStack. EDP containers are based on Docker technology, orchestrated by Kubernetes compatible solutions.

      There are two main options for Kubernetes provided by EDP:

      • Managed Kubernetes in Public Clouds to avoid installation and management of Kubernetes cluster, and get all benefits of scaling, reliability of this solution;
      • OpenShift that is a Platform as a Service on the top of Kubernetes from Red Hat. OpenShift is the default option for on-premise installation and it can be considered whether the solution built on the top of EDP should be cloud-agnostic or require enterprise support;

      There is no limitation to run EDP on vanilla Kubernetes.

      \ No newline at end of file + Overview - EPAM Delivery Platform

      Overview⚓︎

      EPAM Delivery Platform (EDP) is an open-source cloud-agnostic SaaS/PaaS solution for software development, licensed under Apache License 2.0. It provides a pre-defined set of CI/CD patterns and tools, which allow a user to start product development quickly with established code review, release, versioning, branching, build processes. These processes include static code analysis, security checks, linters, validators, dynamic feature environments provisioning. EDP consolidates the top Open-Source CI/CD tools by running them on Kubernetes/OpenShift, which enables web/app development either in isolated (on-prem) or cloud environments.

      EPAM Delivery Platform, which is also called "The Rocket", is a platform that allows shortening the time that is passed before an active development can be started from several months to several hours.

      EDP consists of the following:

      • The platform based on managed infrastructure and container orchestration
      • Security covering authentication, authorization, and SSO for platform services
      • Development and testing toolset
      • Well-established engineering process and EPAM practices (EngX) reflected in CI/CD pipelines, and delivery analytics
      • Local development with debug capabilities

      Features⚓︎

      • Deployed and configured CI/CD toolset (Tekton, ArgoCD, Nexus, SonarQube, DefectDojo)
      • GitHub(by default), GitLab or Gerrit as a version control system for your code
      • Tekton is a pipeline orchestrator
      • CI pipelines

        Language Framework Build Tool Application Library Autotest
        Java Java 8, Java 11, Java 17 Gradle, Maven
        Python Python 3.8, FastAPI, Flask Python
        C# .Net 3.1, .Net 6.0 .Net
        Go Beego, Gin, Operator SDK Go
        JavaScript React, Vue, Angular, Express, Next.js, Antora NPM
        HCL Terraform Terraform
        Helm Helm, Pipeline Helm
        Groovy Codenarc Codenarc
        Rego OPA OPA
        Container Docker Kaniko
      • Portal UI as a single entry point
      • Environments for Microservice Deployment
      • Kubernetes native approach (CRD, CR) to declare CI/CD pipelines

      What's Inside⚓︎

      EPAM Delivery Platform (EDP) is suitable for all aspects of delivery starting from development including the capability to deploy production environment. EDP architecture is represented on a diagram below.

      Architecture
      Architecture

      EDP consists of four cross-cutting concerns:

      1. Infrastructure as a Service;
      2. GitOps approach;
      3. Container orchestration and centralized services;
      4. Security.

      On the top of these indicated concerns, EDP adds several blocks that include:

      • EDP CI/CD Components. EDP component enables a feature in CI/CD or an instance artifacts storage and distribution (Nexus or Artifactory), static code analysis (Sonar), etc.;
      • EDP Artifacts. This element represents an artifact that is being delivered through EDP and presented as a code.

        Artifact samples: frontend, backend, mobile, applications, functional and non-functional autotests, workloads for 3rd party components that can be deployed together with applications.

      • EDP development and production environments that share the same logic. Environments wrap a set of artifacts with a specific version, and allow performing SDLC routines in order to be sure of the artifacts quality;
      • Pipelines. Pipelines cover CI/CD process, production rollout and updates. They also connect three elements indicated above via automation allowing SDLC routines to be non-human;

      Technology Stack⚓︎

      Explore the EDP technology stack diagram

      Technology stack
      Technology stack

      The EDP IaaS layer supports most popular public clouds AWS, Azure and GCP keeping the capability to be deployed on private/hybrid clouds based on OpenStack. EDP containers are based on Docker technology, orchestrated by Kubernetes compatible solutions.

      There are two main options for Kubernetes provided by EDP:

      • Managed Kubernetes in Public Clouds to avoid installation and management of Kubernetes cluster, and get all benefits of scaling, reliability of this solution;
      • OpenShift that is a Platform as a Service on the top of Kubernetes from Red Hat. OpenShift is the default option for on-premise installation and it can be considered whether the solution built on the top of EDP should be cloud-agnostic or require enterprise support;

      There is no limitation to run EDP on vanilla Kubernetes.

      \ No newline at end of file diff --git a/pricing/index.html b/pricing/index.html index 0c2828d79..c3a3354c8 100644 --- a/pricing/index.html +++ b/pricing/index.html @@ -1,4 +1,4 @@ - Pricing - EPAM Delivery Platform

      Professional Service Pricing

      Experience the flexibility of deploying the EPAM Delivery Platform on your on-premises infrastructure or in the Cloud. Your valuable data stays securely within your perimeter, ensuring uncompromising security at every step.

      Open Source
      Free
      Enjoy essential features at no cost, perfect for getting started.
      • Community support
      • EDP on any Kubernetes cluster
      • CI pipelines for polyglot microservices
      Scale
      $6,000 /month
      Access advanced features and scalability options to meet the evolving needs of your growing enterprise.
      • 60 professional service hours *
      • 3 months minimum commitment **
      Enterprise
      $13,000 /month
      For mission-critical projects and extensive organizational needs, it delivers top-tier features, priority support, and customizable solutions.
      • 160 professional service hours *
      • 3 months minimum commitment **

      * Professional service hours are versatile, blended hours bundled within your subscription plan. They encompass expertise from diverse specialists, including business analysts, architects, lead automation engineers, DevOps (System Engineers), and performance engineers. These multi-functional hours cater to EDP installation, configuration, integration, customization, feature implementation, updates, and use case implementation. They empower you to leverage a range of expertise for various aspects of optimizing and enhancing your EDP experience.

      ** A minimum time commitment is required for the use of a certain subscription plan.

      Service Category Description Professional Service Hours*
      Deployment, Installation, Configuration
      Deploy the platform in an empty AWS account, including components such as infrastructure, CI/CD, SSO integration, SonarQube, DependencyTrack, Nexus Repository, DefectDojo, logging, monitoring, Docker Registry, and Observability. 60
      Deploy the platform with minimum components set in the existing Kubernetes cluster, including CI/CD, integration with the existing SonarQube, Repository Manager, and Container Registry. 8
      Traceability and Observability customization: Storage backend configuration, performance tunning. 40
      Monitoring stack customization: Custom Dashboards, Alerts, Notifications. 40
      Logging stack customization: Custom filters and processing for Fluent Bit, logs rotation policy tunning. 40
      SonarQube Quality Gate and Quality Profile configuration. 8
      Security (SAST/SCA) report analysis per single codebase. 8
      Framework/Language
      Onboard new Framework or Build tool with CI Pipelines: Implement EDP standard CI Pipeline steps (build, test, lint, push). 40
      Marketplace Template Development: Create a new template, Implement EDP standard CI Pipeline steps (build, test, lint, push). 40
      Custom Tool Integration
      Integrate custom tool with CI Pipeline over API. 30
      Integrate custom tool with EDP Portal UI over Kubernetes API. 40
      Integrate custom tool with EDP Portal UI over API. 80
      Target Deployment Platform Customization
      Add Custom Deployment Platform: Implement custom CD Pipeline. 80

      * The values presented in the table are provided for reference purposes only and are subject to change based on the final scope of the work. Please note that these figures serve as estimates and may be adjusted accordingly as the project details are finalized.

      Pricing

      Pricing⚓︎

      \ No newline at end of file +

      Professional Service Pricing

      Experience the flexibility of deploying the EPAM Delivery Platform on your on-premises infrastructure or in the Cloud. Your valuable data stays securely within your perimeter, ensuring uncompromising security at every step.

      Open Source
      Free
      Enjoy essential features at no cost, perfect for getting started.
      • Community support
      • EDP on any Kubernetes cluster
      • CI pipelines for polyglot microservices
      Scale
      $6,000 /month
      Access advanced features and scalability options to meet the evolving needs of your growing enterprise.
      • 60 professional service hours *
      • 3 months minimum commitment **
      Enterprise
      $13,000 /month
      For mission-critical projects and extensive organizational needs, it delivers top-tier features, priority support, and customizable solutions.
      • 160 professional service hours *
      • 3 months minimum commitment **

      * Professional service hours are versatile, blended hours bundled within your subscription plan. They encompass expertise from diverse specialists, including business analysts, architects, lead automation engineers, DevOps (System Engineers), and performance engineers. These multi-functional hours cater to EDP installation, configuration, integration, customization, feature implementation, updates, and use case implementation. They empower you to leverage a range of expertise for various aspects of optimizing and enhancing your EDP experience.

      ** A minimum time commitment is required for the use of a certain subscription plan.

      Service Category Description Professional Service Hours*
      Deployment, Installation, Configuration
      Deploy the platform in an empty AWS account, including components such as infrastructure, CI/CD, SSO integration, SonarQube, DependencyTrack, Nexus Repository, DefectDojo, logging, monitoring, Docker Registry, and Observability. 60
      Deploy the platform with minimum components set in the existing Kubernetes cluster, including CI/CD, integration with the existing SonarQube, Repository Manager, and Container Registry. 8
      Traceability and Observability customization: Storage backend configuration, performance tunning. 40
      Monitoring stack customization: Custom Dashboards, Alerts, Notifications. 40
      Logging stack customization: Custom filters and processing for Fluent Bit, logs rotation policy tunning. 40
      SonarQube Quality Gate and Quality Profile configuration. 8
      Security (SAST/SCA) report analysis per single codebase. 8
      Framework/Language
      Onboard new Framework or Build tool with CI Pipelines: Implement EDP standard CI Pipeline steps (build, test, lint, push). 40
      Marketplace Template Development: Create a new template, Implement EDP standard CI Pipeline steps (build, test, lint, push). 40
      Custom Tool Integration
      Integrate custom tool with CI Pipeline over API. 30
      Integrate custom tool with EDP Portal UI over Kubernetes API. 40
      Integrate custom tool with EDP Portal UI over API. 80
      Target Deployment Platform Customization
      Add Custom Deployment Platform: Implement custom CD Pipeline. 80

      * The values presented in the table are provided for reference purposes only and are subject to change based on the final scope of the work. Please note that these figures serve as estimates and may be adjusted accordingly as the project details are finalized.

      Pricing

      Pricing⚓︎

      \ No newline at end of file diff --git a/quick-start/create-application/index.html b/quick-start/create-application/index.html index 61bdfabcd..22d7afc22 100644 --- a/quick-start/create-application/index.html +++ b/quick-start/create-application/index.html @@ -1,4 +1,4 @@ - Create Application - EPAM Delivery Platform

      Create Application⚓︎

      In EDP, all software components, such as applications, libraries, Terraform infrastructures, and automated tests, are termed as codebases. EDP provides flexible methods for scaffolding these components.

      This guide will lead you through creating a Go application using the Gin framework. The EDP Marketplace will be utilized to streamline the application creation process.

      Application Onboarding⚓︎

      To create the first application, complete the instructions below:

      1. In the EDP Portal, navigate to EDP -> Marketplace.

      2. In the Marketplace section, select Web Applications with Gin Framework:

        Marketplace applications
        Marketplace applications

      3. In the appeared window, define the following values and click Apply:

        • Component name: my-go-gin-app
        • Description: My first application
        • Git server: github
        • Repository name: <github_account_name>/my-go-gin-app
        • Codebase versioning type: edp
        • Start version from: 0.1.0
        • Suffix: SNAPSHOT

        Application blank
        Application blank

      4. As soon as the codebase is created, navigate to it via the notification at the bottom left corner:

        Marketplace notification
        Marketplace notification

      Build Application⚓︎

      Having created the Go application, proceed to build it by performing the following actions:

      1. In the component details page, expand the application and click the Go to the Source Code button:

        Marketplace notification
        Application details

      2. In the opened Source Code, create new branch called test.

      3. In the SonarCloud organization page, copy the value of the SonarCloud organization name:

        Organization key
        Organization key

      4. In the test branch in GitHub, open the sonar-project.properties file and include the sonar.language=go, sonar.scanner.force-deprecated-java-version=true, and sonar.organization parameters where sonar.organization is equal to the value copied in the previous step, resulting in the following configuration:

        sonar.projectKey=my-go-gin-app
        + Create Application - EPAM Delivery Platform      

        Create Application⚓︎

        In EDP, all software components, such as applications, libraries, Terraform infrastructures, and automated tests, are termed as codebases. EDP provides flexible methods for scaffolding these components.

        This guide will lead you through creating a Go application using the Gin framework. The EDP Marketplace will be utilized to streamline the application creation process.

        Application Onboarding⚓︎

        To create the first application, complete the instructions below:

        1. In the EDP Portal, navigate to EDP -> Marketplace.

        2. In the Marketplace section, select Web Applications with Gin Framework:

          Marketplace applications
          Marketplace applications

        3. In the appeared window, define the following values and click Apply:

          • Component name: my-go-gin-app
          • Description: My first application
          • Git server: github
          • Repository name: <github_account_name>/my-go-gin-app
          • Codebase versioning type: edp
          • Start version from: 0.1.0
          • Suffix: SNAPSHOT

          Application blank
          Application blank

        4. As soon as the codebase is created, navigate to it via the notification at the bottom left corner:

          Marketplace notification
          Marketplace notification

        Build Application⚓︎

        Having created the Go application, proceed to build it by performing the following actions:

        1. In the component details page, expand the application and click the Go to the Source Code button:

          Marketplace notification
          Application details

        2. In the opened Source Code, create new branch called test.

        3. In the SonarCloud organization page, copy the value of the SonarCloud organization name:

          Organization key
          Organization key

        4. In the test branch in GitHub, open the sonar-project.properties file and include the sonar.language=go, sonar.scanner.force-deprecated-java-version=true, and sonar.organization parameters where sonar.organization is equal to the value copied in the previous step, resulting in the following configuration:

          sonar.projectKey=my-go-gin-app
           sonar.projectName=my-go-gin-app
           sonar.go.coverage.reportPaths=coverage.out
           sonar.test.inclusions=**/*_test.go
          @@ -20,4 +20,4 @@
                 - sonar-scanner
           

          Note

          This step is necessary due to SonarCloud's discontinuation of support for Java 11, which is utilized in the sonarqube-scanner image. This solution is designed specifically for the EDP 3.7.x and lower versions. Users of EDP 3.8.x and higher versions can skip this step.

        5. In the component details page, click the Trigger build pipeline run button:

          Triggering pipeline run
          Triggering pipeline run

        6. Enable port-forwarding for the edp-tekton-dashboard service (in case ingress is not deployed):

          kubectl port-forward service/edp-tekton-dashboard 64372:8080 -n edp
           
          localhost:64372
          -
        7. To observe the build pipeline status, click the tree diagram icon in the Diagram column:

          Tree diagram window
          Tree diagram window

        8. Once the build is failed, click the failed stage name to open the Tekton pipeline run:

          Failure details
          Failure details

          The initial pipeline is expected to fail, primarily due to SonarCloud intricacies. It is imperative to set a Quality Gate in SonarCloud after the initial pipeline run and subsequently re-trigger the build pipeline. After the pipeline failure, a new project is expected to appear in the organization.

        9. In the SonarCloud organization, select the newly appeared project and click the Set New Code Definition button:

          New code definition
          New code definition

        10. In the New Code page, set the Previous version option and click Save:

          New Code page
          New Code page

        11. In EDP Portal, trigger build pipeline run one more time and wait until the pipeline run is finished.

        Build pipelines are designed to generate an executable image of an application. Once built, the image can be run in a target environment.

        Now that you have successfully built an application, the next step involves creating an environment for deployment. To deploy the application, it is necessary to install and integrate Argo CD. To deploy the application, we need to install and integrate Argo CD. To do this, navigate to the Integrate Argo CD page.

        \ No newline at end of file +
      5. To observe the build pipeline status, click the tree diagram icon in the Diagram column:

        Tree diagram window
        Tree diagram window

      6. Once the build is failed, click the failed stage name to open the Tekton pipeline run:

        Failure details
        Failure details

        The initial pipeline is expected to fail, primarily due to SonarCloud intricacies. It is imperative to set a Quality Gate in SonarCloud after the initial pipeline run and subsequently re-trigger the build pipeline. After the pipeline failure, a new project is expected to appear in the organization.

      7. In the SonarCloud organization, select the newly appeared project and click the Set New Code Definition button:

        New code definition
        New code definition

      8. In the New Code page, set the Previous version option and click Save:

        New Code page
        New Code page

      9. In EDP Portal, trigger build pipeline run one more time and wait until the pipeline run is finished.

      Build pipelines are designed to generate an executable image of an application. Once built, the image can be run in a target environment.

      Now that you have successfully built an application, the next step involves creating an environment for deployment. To deploy the application, it is necessary to install and integrate Argo CD. To deploy the application, we need to install and integrate Argo CD. To do this, navigate to the Integrate Argo CD page.

      \ No newline at end of file diff --git a/quick-start/deploy-application/index.html b/quick-start/deploy-application/index.html index e0ca35f95..7afec315b 100644 --- a/quick-start/deploy-application/index.html +++ b/quick-start/deploy-application/index.html @@ -1 +1 @@ - Deploy Application - EPAM Delivery Platform

      Deploy Application⚓︎

      Now, proceed to deploy our first application. This page provides comprehensive instructions on creating an environment and deploying the application within it.

      Create GitOps Repository⚓︎

      As a prerequisite, create a GitOps repository in your GitHub account. EDP Portal adheres to the GitOps approach when working with environments. In a GitOps repository, values are saved to redefine the default behavior (parameters) of deployment for each environment. The creation of a GitOps repository involves the following two steps:

      1. In EDP Portal, navigate to EDP -> Configuration -> Deployment -> GitOps:

        GitOps tab
        GitOps tab

      2. Define the following values and click Save:

        • Git server: github
        • Git repo relative path: github_account_name
        • Repository Name: edp-gitops

        Add GitOps repository
        Add GitOps repository

      Create Environment⚓︎

      To create an environment, follow the steps below:

      1. In EDP Portal, navigate to EDP -> Environments and click the + Create button:

        Environments section
        Environments section

      2. In the Create CD Pipeline window, enter the pipeline name and click the Proceed button:

        Pipelines tab
        Pipelines tab

      3. In the Applications tab, select the go-application and main branch:

        Applications tab
        Applications tab

      4. In the Stages tab, click the Add Stage button.

      5. Define the following values and click Apply:

        • Cluster: in-cluster
        • Stage name: dev
        • Namespace: edp-my-go-gin-app-dev
        • Description: Development stage
        • Trigger type: Manual
        • Pipeline template: deploy
        • Quality gate type: Manual
        • Step name: dev

        Create Stage window
        Create Stage window

      6. In the Stages tab, click the Apply button.

      Application Deployment⚓︎

      To Deploy application, follow the steps below:

      1. In the Environments list, click the Environment name:

        Environments list
        Environments list

      2. In the Environment details page, click the stage name to enter the stage details:

        Environment details
        Environment details

      3. Once you enter the stage details, proceed to deploy the application:

        1. Select an application;
        2. Select the Image stream version;
        3. Click the Deploy button.

        Deploying application
        Deploying application

      Congratulations! You have passed the Quick Start guide! We hope you enjoyed this journey.

      Now that you have a good understanding of how EDP works, you can further enhance its capabilities by integrating it with Nexus. Additionally, explore other functionalities detailed in our Use Cases section. If you're eager to unlock the full potential of EDP, navigate to the Operator Guide to fine-tune your EDP for optimal performance!

      \ No newline at end of file + Deploy Application - EPAM Delivery Platform

      Deploy Application⚓︎

      Now, proceed to deploy our first application. This page provides comprehensive instructions on creating an environment and deploying the application within it.

      Create GitOps Repository⚓︎

      As a prerequisite, create a GitOps repository in your GitHub account. EDP Portal adheres to the GitOps approach when working with environments. In a GitOps repository, values are saved to redefine the default behavior (parameters) of deployment for each environment. The creation of a GitOps repository involves the following two steps:

      1. In EDP Portal, navigate to EDP -> Configuration -> Deployment -> GitOps:

        GitOps tab
        GitOps tab

      2. Define the following values and click Save:

        • Git server: github
        • Git repo relative path: github_account_name
        • Repository Name: edp-gitops

        Add GitOps repository
        Add GitOps repository

      Create Environment⚓︎

      To create an environment, follow the steps below:

      1. In EDP Portal, navigate to EDP -> Environments and click the + Create button:

        Environments section
        Environments section

      2. In the Create CD Pipeline window, enter the pipeline name and click the Proceed button:

        Pipelines tab
        Pipelines tab

      3. In the Applications tab, select the go-application and main branch:

        Applications tab
        Applications tab

      4. In the Stages tab, click the Add Stage button.

      5. Define the following values and click Apply:

        • Cluster: in-cluster
        • Stage name: dev
        • Namespace: edp-my-go-gin-app-dev
        • Description: Development stage
        • Trigger type: Manual
        • Pipeline template: deploy
        • Quality gate type: Manual
        • Step name: dev

        Create Stage window
        Create Stage window

      6. In the Stages tab, click the Apply button.

      Application Deployment⚓︎

      To Deploy application, follow the steps below:

      1. In the Environments list, click the Environment name:

        Environments list
        Environments list

      2. In the Environment details page, click the stage name to enter the stage details:

        Environment details
        Environment details

      3. Once you enter the stage details, proceed to deploy the application:

        1. Select an application;
        2. Select the Image stream version;
        3. Click the Deploy button.

        Deploying application
        Deploying application

      Congratulations! You have passed the Quick Start guide! We hope you enjoyed this journey.

      Now that you have a good understanding of how EDP works, you can further enhance its capabilities by integrating it with Nexus. Additionally, explore other functionalities detailed in our Use Cases section. If you're eager to unlock the full potential of EDP, navigate to the Operator Guide to fine-tune your EDP for optimal performance!

      \ No newline at end of file diff --git a/quick-start/integrate-argocd/index.html b/quick-start/integrate-argocd/index.html index f27dfc47c..84708481a 100644 --- a/quick-start/integrate-argocd/index.html +++ b/quick-start/integrate-argocd/index.html @@ -1,4 +1,4 @@ - Integrate Argo CD - EPAM Delivery Platform

      Integrate Argo CD⚓︎

      EPAM Delivery Platform employs Argo CD as a Continuous Deployment tool for its purposes. This page provides guidance on the installation procedure for Argo CD.

      Installation⚓︎

      To install Argo CD, follow the steps below:

      1. Add a Helm Chart repository:

        helm repo add argocd https://argoproj.github.io/argo-helm
        + Integrate Argo CD - EPAM Delivery Platform      

        Integrate Argo CD⚓︎

        EPAM Delivery Platform employs Argo CD as a Continuous Deployment tool for its purposes. This page provides guidance on the installation procedure for Argo CD.

        Installation⚓︎

        To install Argo CD, follow the steps below:

        1. Add a Helm Chart repository:

          helm repo add argocd https://argoproj.github.io/argo-helm
           helm repo update
           
        2. Create the argocd-values.yaml file that and paste the following data:

          View: argocd-values.yaml
          redis-ha:
             enabled: false
          @@ -104,4 +104,4 @@
               sourceNamespaces:
                 - edp
           
          1. Please enter your GitHub account name here.
        3. Retrieve the Argo CD admin secret and securely save it, as it will be required for future use:

          kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d
          -

        Now that all the necessary preparations are completed, we can proceed with the Deploy Application page.

        \ No newline at end of file +

      Now that all the necessary preparations are completed, we can proceed with the Deploy Application page.

      \ No newline at end of file diff --git a/quick-start/integrate-container-registry/index.html b/quick-start/integrate-container-registry/index.html index daebf3afe..c043e3e0e 100644 --- a/quick-start/integrate-container-registry/index.html +++ b/quick-start/integrate-container-registry/index.html @@ -1 +1 @@ - Integrate DockerHub - EPAM Delivery Platform

      Integrate DockerHub⚓︎

      EDP utilizes container registries for storing and distributing application images. This page provides instructions on integrating your platform with DockerHub.

      Integration Procedure⚓︎

      To integrate EPAM Delivery Platform with DockerHub, complete the guidelines below:

      1. Log in to your DockerHub account.

      2. In the DockerHub main page, click your user icon at the top right corner and select Account Settings.

      3. In the Account Settings page, select the Security tab and click New Access Token.

      4. Enter the token description, select the Read, Write, Delete permission and click Generate.

      5. Copy the generated token:

        DockerHub token
        DockerHub token

      6. In EDP Portal, navigate to EDP -> Configuration -> Registry and click the Add Registry button.

      7. In the Registry Provider field, select DockerHub. Define the following values and click Save:

        • Registry Endpoint: https://docker.io
        • Registry Space: DockerHub account name
        • User: DockerHub account name
        • Password/Token: Your generated access token
        • Use the Push Account's credentials: check

        DockerHub integration
        DockerHub integration

      Note

      In the DockerHub context, the Registry Space field is equivalent to the account/organization name.

      With all integrations in place, let us move forward with creating applications. Please proceed to the Create Application page for further instructions.

      \ No newline at end of file + Integrate DockerHub - EPAM Delivery Platform

      Integrate DockerHub⚓︎

      EDP utilizes container registries for storing and distributing application images. This page provides instructions on integrating your platform with DockerHub.

      Integration Procedure⚓︎

      To integrate EPAM Delivery Platform with DockerHub, complete the guidelines below:

      1. Log in to your DockerHub account.

      2. In the DockerHub main page, click your user icon at the top right corner and select Account Settings.

      3. In the Account Settings page, select the Security tab and click New Access Token.

      4. Enter the token description, select the Read, Write, Delete permission and click Generate.

      5. Copy the generated token:

        DockerHub token
        DockerHub token

      6. In EDP Portal, navigate to EDP -> Configuration -> Registry and click the Add Registry button.

      7. In the Registry Provider field, select DockerHub. Define the following values and click Save:

        • Registry Endpoint: https://docker.io
        • Registry Space: DockerHub account name
        • User: DockerHub account name
        • Password/Token: Your generated access token
        • Use the Push Account's credentials: check

        DockerHub integration
        DockerHub integration

      Note

      In the DockerHub context, the Registry Space field is equivalent to the account/organization name.

      With all integrations in place, let us move forward with creating applications. Please proceed to the Create Application page for further instructions.

      \ No newline at end of file diff --git a/quick-start/integrate-github/index.html b/quick-start/integrate-github/index.html index adbe1c58a..ca070b027 100644 --- a/quick-start/integrate-github/index.html +++ b/quick-start/integrate-github/index.html @@ -1,2 +1,2 @@ - Integrate GitHub - EPAM Delivery Platform

      Integrate GitHub⚓︎

      To initiate work in the EPAM Delivery Platform, integration with a Version Control System (VCS) is essential. This integration facilitates the use of create, clone, and import strategies for handling source code. This tutorial focuses on the create strategy, wherein an application is directly created in the integrated VCS solution for subsequent work. Here, you will find instructions on how to integrate EDP with GitHub.

      Note

      Prior to moving forward, ensure you possess an active GitHub account and have successfully deployed EDP.

      Integration Procedure⚓︎

      To integrate EDP with GitHub, follow the steps below:

      1. Generate an SSH key pair:

        ssh-keygen -t ed25519 -C "email@example.com"
        -
      2. Add the created SSH key (the public part) to the GitHub account:

        1. In the GitHub main page, click your user icon. Navigate to Settings -> SSH and GPG keys and click New SSH key.
        2. Create the quick_start key. Insert your ed25519.pub key data and click Add SSH key:

          Repo permission
          Repo permission

      3. Generate an access token for the GitHub account with read/write access to the API:

        1. Log in to GitHub.
        2. Click the profile account and navigate to Settings -> Developer Settings.
        3. Select Personal access tokens (classic) and generate a new token with the following parameters:

          Repo permission
          Repo permission

          Note

          The following (Admin:repo, Admin:org, and User) access is necessary for the GitHub Pull Request Builder plugin to retrieve Pull Request commits, their status, and author information.

          Admin:repo permission
          Admin:repo permission
          Admin:org permission
          Admin:org permission
          User permission
          User permission

        4. Save a new personal access token.

      4. In EDP Portal, navigate to EDP -> Configuration -> Git Servers. Define the following values and click Save:

        • Git provider: select GitHub
        • Host: github.com
        • User: Git
        • SSH port: 22
        • HTTPS port: 443
        • Private SSH key: your generated SSH private key
        • Access token: your account token generated in GitHub

        Git Server configuration
        Git Server configuration

      Ensure the Git server has a green status. To store container images, integrate EDP with a container registry by navigating to the Integrate DockerHub page.

      \ No newline at end of file + Integrate GitHub - EPAM Delivery Platform

      Integrate GitHub⚓︎

      To initiate work in the EPAM Delivery Platform, integration with a Version Control System (VCS) is essential. This integration facilitates the use of create, clone, and import strategies for handling source code. This tutorial focuses on the create strategy, wherein an application is directly created in the integrated VCS solution for subsequent work. Here, you will find instructions on how to integrate EDP with GitHub.

      Note

      Prior to moving forward, ensure you possess an active GitHub account and have successfully deployed EDP.

      Integration Procedure⚓︎

      To integrate EDP with GitHub, follow the steps below:

      1. Generate an SSH key pair:

        ssh-keygen -t ed25519 -C "email@example.com"
        +
      2. Add the created SSH key (the public part) to the GitHub account:

        1. In the GitHub main page, click your user icon. Navigate to Settings -> SSH and GPG keys and click New SSH key.
        2. Create the quick_start key. Insert your ed25519.pub key data and click Add SSH key:

          Repo permission
          Repo permission

      3. Generate an access token for the GitHub account with read/write access to the API:

        1. Log in to GitHub.
        2. Click the profile account and navigate to Settings -> Developer Settings.
        3. Select Personal access tokens (classic) and generate a new token with the following parameters:

          Repo permission
          Repo permission

          Note

          The following (Admin:repo, Admin:org, and User) access is necessary for the GitHub Pull Request Builder plugin to retrieve Pull Request commits, their status, and author information.

          Admin:repo permission
          Admin:repo permission
          Admin:org permission
          Admin:org permission
          User permission
          User permission

        4. Save a new personal access token.

      4. In EDP Portal, navigate to EDP -> Configuration -> Git Servers. Define the following values and click Save:

        • Git provider: select GitHub
        • Host: github.com
        • User: Git
        • SSH port: 22
        • HTTPS port: 443
        • Private SSH key: your generated SSH private key
        • Access token: your account token generated in GitHub

        Git Server configuration
        Git Server configuration

      Ensure the Git server has a green status. To store container images, integrate EDP with a container registry by navigating to the Integrate DockerHub page.

      \ No newline at end of file diff --git a/quick-start/integrate-sonarcloud/index.html b/quick-start/integrate-sonarcloud/index.html index 41d922329..55f2380ef 100644 --- a/quick-start/integrate-sonarcloud/index.html +++ b/quick-start/integrate-sonarcloud/index.html @@ -1 +1 @@ - Integrate SonarQube - EPAM Delivery Platform

      Integrate SonarQube⚓︎

      It is mandatory for EDP to have SonarQube integrated with the platform as all the pipelines include the sonar step.

      SonarQube is a robust tool employed in build and code review pipelines to elevate code quality by detecting and reporting issues, along with offering improvement recommendations. SonarCloud, the SaaS solution provided by SonarQube, serves this purpose.

      This guide will lead you through the configuration process of SonarCloud for your project.

      Note

      An alternative option is to use an independent SonarQube instance.

      Integrate SonarCloud⚓︎

      To integrate SonarCloud with the platform, follow the steps below:

      1. Sign up in the SonarCloud with your GitHub account.

      2. Once you are logged in with GitHub, import an organization from GitHub:

        Import organization
        Import organization

        Note

        It is crucial to have the organization created in SonarCloud. If you were signed up in SonarCloud using a GitHub account, SonarCloud will suggest you creating an organization with name that is equivalent to your GitHub account name.

      3. In the Create an organization menu, choose the free plan and click Create organization:

        Create organization
        Choose plan

      4. In your account menu, select the Security tab and generate token:

        Create organization
        Generate token

      5. In EDP Portal, navigate to EDP -> Configuration -> Code Quality. Define the following values and click Save:

        • URL: https://sonarcloud.io
        • Token: account token generated in SonarCloud

        SonarQube integration
        SonarQube integration

      After completing the SonarQube integration, proceed to integrate the platform with GitHub. Navigate to the Integrate GitHub page for further instructions.

      \ No newline at end of file + Integrate SonarQube - EPAM Delivery Platform

      Integrate SonarQube⚓︎

      It is mandatory for EDP to have SonarQube integrated with the platform as all the pipelines include the sonar step.

      SonarQube is a robust tool employed in build and code review pipelines to elevate code quality by detecting and reporting issues, along with offering improvement recommendations. SonarCloud, the SaaS solution provided by SonarQube, serves this purpose.

      This guide will lead you through the configuration process of SonarCloud for your project.

      Note

      An alternative option is to use an independent SonarQube instance.

      Integrate SonarCloud⚓︎

      To integrate SonarCloud with the platform, follow the steps below:

      1. Sign up in the SonarCloud with your GitHub account.

      2. Once you are logged in with GitHub, import an organization from GitHub:

        Import organization
        Import organization

        Note

        It is crucial to have the organization created in SonarCloud. If you were signed up in SonarCloud using a GitHub account, SonarCloud will suggest you creating an organization with name that is equivalent to your GitHub account name.

      3. In the Create an organization menu, choose the free plan and click Create organization:

        Create organization
        Choose plan

      4. In your account menu, select the Security tab and generate token:

        Create organization
        Generate token

      5. In EDP Portal, navigate to EDP -> Configuration -> Code Quality. Define the following values and click Save:

        • URL: https://sonarcloud.io
        • Token: account token generated in SonarCloud

        SonarQube integration
        SonarQube integration

      After completing the SonarQube integration, proceed to integrate the platform with GitHub. Navigate to the Integrate GitHub page for further instructions.

      \ No newline at end of file diff --git a/quick-start/platform-installation/index.html b/quick-start/platform-installation/index.html index 254073f87..1d858994f 100644 --- a/quick-start/platform-installation/index.html +++ b/quick-start/platform-installation/index.html @@ -1,4 +1,4 @@ - Install EDP - EPAM Delivery Platform

      Install EDP⚓︎

      This page serves as the starting point for the quick start guide, where we will install Tekton as a prerequisite and then proceed to install the EPAM Delivery Platform itself.

      Install Tekton⚓︎

      EPAM Delivery Platform relies on Tekton resources, including Tasks, Pipelines, Triggers, and Interceptors to execute CI/CD pipelines.

      To install Tekton, run the commands below:

      kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/previous/v0.53.4/release.yaml
      + Install EDP - EPAM Delivery Platform      

      Install EDP⚓︎

      This page serves as the starting point for the quick start guide, where we will install Tekton as a prerequisite and then proceed to install the EPAM Delivery Platform itself.

      Install Tekton⚓︎

      EPAM Delivery Platform relies on Tekton resources, including Tasks, Pipelines, Triggers, and Interceptors to execute CI/CD pipelines.

      To install Tekton, run the commands below:

      kubectl apply -f https://storage.googleapis.com/tekton-releases/pipeline/previous/v0.53.4/release.yaml
       kubectl apply -f https://storage.googleapis.com/tekton-releases/triggers/previous/v0.25.3/release.yaml
       kubectl apply -f https://storage.googleapis.com/tekton-releases/triggers/previous/v0.25.3/interceptors.yaml
       

      Install EDP⚓︎

      To deploy the platform, follow the steps below:

      1. Add a Helm Chart repository:

        helm repo add epamedp https://epam.github.io/edp-helm-charts/stable
        @@ -9,4 +9,4 @@
         

        EDP Portal login menu
        EDP Portal login menu

      2. Create the edp-admin service account and generate an access token to open the EDP Portal:

          kubectl -n edp create serviceaccount edp-admin
           kubectl create clusterrolebinding edp-admin --serviceaccount=edp:edp-admin --clusterrole=cluster-admin
           kubectl create token edp-admin -n edp
        -
      3. In the login menu, paste the generated token in the ID token field and click the Authenticate button.

      4. Upon logging in, specify the namespace for EDP Portal where EDP is deployed by clicking the cluster settings link in the bottom left corner of the UI:

        Specify namespaces
        Specify namespaces

      5. In the Cluster Settings page, define the following for fields:

        • Default namespace: edp
        • Allowed namespaces: edp

        Cluster Settings menu
        Cluster Settings menu

      Note

      Remember to click the + icon when adding the allowed namespace.

      After completing these steps, you will get access to EPAM Delivery Platform components through the EDP Portal UI. You can now proceed with the integration steps, starting with the SonarQube integration.

      \ No newline at end of file +
    10. In the login menu, paste the generated token in the ID token field and click the Authenticate button.

    11. Upon logging in, specify the namespace for EDP Portal where EDP is deployed by clicking the cluster settings link in the bottom left corner of the UI:

      Specify namespaces
      Specify namespaces

    12. In the Cluster Settings page, define the following for fields:

      • Default namespace: edp
      • Allowed namespaces: edp

      Cluster Settings menu
      Cluster Settings menu

    13. Note

      Remember to click the + icon when adding the allowed namespace.

      After completing these steps, you will get access to EPAM Delivery Platform components through the EDP Portal UI. You can now proceed with the integration steps, starting with the SonarQube integration.

      \ No newline at end of file diff --git a/quick-start/quick-start-overview/index.html b/quick-start/quick-start-overview/index.html index 4cf60436e..37ad05d68 100644 --- a/quick-start/quick-start-overview/index.html +++ b/quick-start/quick-start-overview/index.html @@ -1,2 +1,2 @@ - Overview - EPAM Delivery Platform

      Overview⚓︎

      This page serves as an introductory part of the Quick Start guide. It outlines the core steps and reasons to perform them.

      The purpose of this guide is to swiftly demonstrate the key functionalities of the EPAM Delivery Platform (EDP). After completing this guide, users will have a clear understanding of EDP’s capabilities. This guide is designed for those who wish to quickly explore EDP.

      Setup Flow⚓︎

      Basically, the installation procedure of EDP consists of 4 logical parts:

      graph LR;
      -    A(Prerequisites) --> B(Platform Installation) --> C(Integration) --> D(Application Deployment)
      1. Prerequisites - On this step, Tekton and Argo CD are installed to provide EDP with CI/CD mechanisms.
      2. Platform Installation - Here we deploy the EPAM Delivery Platform itself and adjust it for work.
      3. Integration - On this step, third-party tools are integrated with EDP to provide the platform with a minimum functionality set.
      4. Application Deployment - Here we create and deploy an application, this is the target endpoint of the whole guide.

      Formally, the guide is divided by two parts: CI and CD, corresponding to the stages of the pipeline that the platform supports at each moment.

      Prerequisites⚓︎

      Before deploying EDP, ensure to deploy a local Kubernetes cluster. We recommend allocating 4 CPUs and 8 GB of RAM to it.

      Alternatively, use any cloud provider that offers Kubernetes service with sufficient CPU and RAM capacity. For instance, we encourage you to check out our video tutorial, where we demonstrate the installation of EDP via the Civo Marketplace. In this case you will be able to skip the installation part of the guide and proceed with adjusting integrations.

      Starting Point⚓︎

      The very first step of the guide is to prepare the cluster environment for the EDP setup process by installing the Tekton tool. Immediately after this we will be able to install EDP.

      All the guidelines are described in the Platform Installation page. Alternatively, watch our video tutorial that clearly demonstrates this process.

      Good luck!

      \ No newline at end of file + Overview - EPAM Delivery Platform

      Overview⚓︎

      This page serves as an introductory part of the Quick Start guide. It outlines the core steps and reasons to perform them.

      The purpose of this guide is to swiftly demonstrate the key functionalities of the EPAM Delivery Platform (EDP). After completing this guide, users will have a clear understanding of EDP’s capabilities. This guide is designed for those who wish to quickly explore EDP.

      Setup Flow⚓︎

      Basically, the installation procedure of EDP consists of 4 logical parts:

      graph LR;
      +    A(Prerequisites) --> B(Platform Installation) --> C(Integration) --> D(Application Deployment)
      1. Prerequisites - On this step, Tekton and Argo CD are installed to provide EDP with CI/CD mechanisms.
      2. Platform Installation - Here we deploy the EPAM Delivery Platform itself and adjust it for work.
      3. Integration - On this step, third-party tools are integrated with EDP to provide the platform with a minimum functionality set.
      4. Application Deployment - Here we create and deploy an application, this is the target endpoint of the whole guide.

      Formally, the guide is divided by two parts: CI and CD, corresponding to the stages of the pipeline that the platform supports at each moment.

      Prerequisites⚓︎

      Before deploying EDP, ensure to deploy a local Kubernetes cluster. We recommend allocating 4 CPUs and 8 GB of RAM to it.

      Alternatively, use any cloud provider that offers Kubernetes service with sufficient CPU and RAM capacity. For instance, we encourage you to check out our video tutorial, where we demonstrate the installation of EDP via the Civo Marketplace. In this case you will be able to skip the installation part of the guide and proceed with adjusting integrations.

      Starting Point⚓︎

      The very first step of the guide is to prepare the cluster environment for the EDP setup process by installing the Tekton tool. Immediately after this we will be able to install EDP.

      All the guidelines are described in the Platform Installation page. Alternatively, watch our video tutorial that clearly demonstrates this process.

      Good luck!

      \ No newline at end of file diff --git a/roadmap/index.html b/roadmap/index.html index 3f272f1e2..a1b782237 100644 --- a/roadmap/index.html +++ b/roadmap/index.html @@ -1 +1 @@ - RoadMap - EPAM Delivery Platform

      RoadMap⚓︎

      RoadMap consists of three streams:

      I. Community⚓︎

      Goals:

      • Innovation Through Collaboration
      • Improve OpenSource Adoption
      • Build Community around technology solutions EDP is built on

      Deliver Operators on OperatorHub⚓︎

      OperatorHub is a defacto leading solution which consolidates Kubernetes Community around Operators. EDP follows the best practices of delivering Operators in a quick and reliable way. We want to improve Deployment and Management experience for our Customers by publishing all EDP operators on this HUB.

      Another artifact aggregator which is used by EDP - ArtifactHub, that holds description for both components: stable and under-development.

      OperatorHub. Keycloak Operator

      EDP Keycloak Operator is now available from OperatorHub both for Upstream (Kubernetes) and OpenShift deployments.

      II. Architecture⚓︎

      Goals:

      • Improve reusability for EDP components
      • Integrate Kubernetes Native Deployment solutions
      • Introduce abstraction layer for CI/CD components
      • Build processes around the GitOps approach
      • Introduce secrets management

      Kubernetes Multitenancy⚓︎

      Multiple instances of EDP are run in a single Kubernetes cluster. One way to achieve this is to use Multitenancy. Initially, Kiosk was selected as tools that provides this capability. An alternative option that EDP Team took into consideration is Capsule. Another tool which goes far beyond multitenancy is vcluster going a good candidate for e2e testing scenarios where one needs simple lightweight kubernetes cluster in CI pipelines.

      EDP Release 3.5.3

      The EPAM Delivery Platform (EDP) has added Capsule as a general tenant management solution for Kubernetes. Capsule is an open-source operator that enables you to create and manage multiple tenants on a shared Kubernetes cluster, while ensuring resource isolation, security, and governance.

      EDP Release 3.5.3

      Vcluster is actively used in EDP for e2e testing purposes.

      Microservice Reference Architecture Framework⚓︎

      EDP provides basic Application Templates for a number of technology stacks (Java, .Net, NPM, Python) and Helm is used as a deployment tool. The goal is to extend this library and provide: Application Templates which are built on pre-defined architecture patterns (e.g., Microservice, API Gateway, Circuit Breaker, CQRS, Event Driven) and Deployment Approaches: Canary, Blue/Green. This requires additional tools installation on cluster as well.

      Policy Enforcement for Kubernetes⚓︎

      Running workload in Kubernetes calls for extra effort from Cluster Administrators to ensure those workloads do follow best practices or specific requirements defined on organization level. Those requirements can be formalized in policies and integrated into: CI Pipelines and Kubernetes Cluster (through Admission Controller approach) - to guarantee proper resource management during development and runtime phases. EDP uses Open Policy Agent (from version 2.8.0), since it supports compliance check for more use-cases: Kubernetes Workloads, Terraform and Java code, HTTP APIs and many others. Kyverno is another option being checked in scope of this activity.

      Secrets Management⚓︎

      EDP should provide secrets management as a part of platform. There are multiple tools providing secrets management capabilities. The aim is to be aligned with GitOps and Operator Pattern approaches so HashiCorp Vault, Banzaicloud Bank Vaults, Bitnami Sealed Secrets are currently used for internal projects and some of them should be made publicly available - as a part of EDP Deployment.

      EDP Release 2.12.x

      External Secret Operator is a recommended secret management tool for the EDP components.

      Release Management⚓︎

      Conventional Commits and Conventional Changelog are two approaches to be used as part of release process. Today EDP provides only capabilities to manage Release Branches. This activity should address this gap by formalizing and implementing Release Process as a part of EDP. Topics to be covered: Versioning, Tagging, Artifacts Promotion.

      Kubernetes Native CI/CD Pipelines⚓︎

      EDP has deprecated Jenkins in favour of Tekton. Jenkins is no longer available since EDP v3.4.4.

      EDP Release 2.12.x

      Argo CD is suggested as a solution providing the Continuous Delivery capabilities.

      EDP Release 3.0

      Tekton is used as a CI/CD pipelines orchestration tool on the platform. Review edp-tekton GitHub repository that keeps all the logic behind this solution on the EDP (Pipelines, Tasks, TriggerTemplates, Interceptors, etc). Get acquainted with the series of publications on our Medium Page.

      Advanced EDP Role-based Model⚓︎

      EDP has a number of base roles which are used across EDP. In some cases it is necessary to provide more granular permissions for specific users. It is possible to do this using Kubernetes Native approach.

      Notifications Framework⚓︎

      EDP has a number of components which need to report their statuses: Build/Code Review/Deploy Pipelines, changes in Environments, updates with artifacts. The goal for this activity is to onboard Kubernetes Native approach which provides Notification capabilities with different sources/channels integration (e.g. Email, Slack, MS Teams). Some of these tools are Argo Events, Botkube.

      Reconciler Component Retirement⚓︎

      Persistent layer, which is based on edp-db (PostgreSQL) and reconciler component should be retired in favour of Kubernetes Custom Resource (CR). The latest features in EDP are implemented using CR approach.

      EDP Release 3.0

      Reconciler component is deprecated and is no longer supported. All the EDP components are migrated to Kubernetes Custom Resources (CR).

      III. Building Blocks⚓︎

      Goals:

      • Introduce best practices from Microservice Reference Architecture deployment and observability using Kubernetes Native Tools
      • Enable integration with the Centralized Test Reporting Frameworks
      • Onboard SAST/DAST tool as a part of CI pipelines and Non-Functional Testing activities

      EDP Release 2.12.x

      SAST is introduced as a mandatory part of the CI Pipelines. The list of currently supported SAST scanners.

      Infrastructure as Code⚓︎

      EDP Target tool for Infrastructure as Code (IaC) is Terraform. EDP sees two CI/CD scenarios while working with IaC: Module Development and Live Environment Deployment. Today, EDP provides basic capabilities (CI Pipelines) for Terraform Module Development. At the same time, currently EDP doesn't provide Deployment pipelines for Live Environments and the feature is under development. Terragrunt is an option to use in Live Environment deployment. Another Kubernetes Native approach to provision infrastructure components is Crossplane.

      Database Schema Management⚓︎

      One of the challenges for Application running in Kubernetes is to manage database schema. There are a number of tools which provides such capabilities, e.g. Liquibase, Flyway. Both tools provide versioning control for database schemas. There are different approaches on how to run migration scripts in Kubernetes: in init container, as separate Job or as a separate CD stage. Purpose of this activity is to provide database schema management solution in Kubernetes as a part of EDP. EDP Team investigates SchemaHero tool and use-cases which suits Kubernetes native approach for database schema migrations.

      Open Policy Agent⚓︎

      Open Policy Agent is introduced in version 2.8.0. EDP now supports CI for Rego Language, so you can develop your own policies. The next goal is to provide pipeline steps for running compliance policies check for Terraform, Java, Helm Chart as a part of CI process.

      Report Portal⚓︎

      EDP uses Allure Framework as a Test Report tool. Another option is to integrate Report Portal into EDP ecosystem.

      EDP Release 3.0

      Use ReportPortal to consolidate and analyze your Automation tests results. Consult our pages on how to perform reporting and Keycloak integration.

      Carrier⚓︎

      Carrier provides Non-functional testing capabilities.

      Java 17⚓︎

      EDP supports two LTS versions of Java: 8 and 11. The goal is to provide Java 17 (LTS) support.

      EDP Release 3.2.1

      CI Pipelines for Java 17 is available in EDP.

      Velero⚓︎

      Velero is used as a cluster backup tool and is deployed as a part of Platform. Currently, Multitenancy/On-premise support for backup capabilities is in process.

      Istio⚓︎

      Istio is to be used as a Service Mesh and to address challenges for Microservice or Distributed Architectures.

      Kong⚓︎

      Kong is one of tools which is planned to use as an API Gateway solution provider. Another possible candidate for investigation is Ambassador API Gateway

      OpenShift 4.X⚓︎

      EDP supports the OpenShift 4.9 platform.

      EDP Release 2.12.x

      EDP Platform runs on the latest OKD versions: 4.9 and 4.10. Creating the IAM Roles for Service Account is a recommended way to work with AWS Resources from the OKD cluster.

      IV. Admin Console (UI)⚓︎

      Goals:

      • Improve UÐ¥ for different user types to address their concerns in the delivery model
      • Introduce user management capabilities
      • Enrich with traceability metrics for products

      EDP Release 2.12.x

      EDP Team has introduced a new UI component called EDP Headlamp, which will replace the EDP Admin Console in future releases. EDP Headlamp is based on the Kinvolk Headlamp UI Client.

      EDP Release 3.0

      EDP Headlamp is used as a Control Plane UI on the platform.

      EDP Release 3.4

      Since EDP v3.4.0, Headlamp UI has been renamed to EDP Portal.

      Users Management⚓︎

      EDP uses Keycloak as an Identity and Access provider. EDP roles/groups are managed inside the Keycloak realm, then these changes are propagated across the EDP Tools. We plan to provide this functionality in EDP Portal using the Kubernetes-native approach (Custom Resources).

      The Delivery Pipelines Dashboard⚓︎

      The CD Pipeline section in EDP Portal provides basic information, such as environments, artifact versions deployed per each environment, and direct links to the namespaces. One option is to enrich this panel with metrics from the Prometheus, custom resources, or events. Another option is to use the existing dashboards and expose EDP metrics to them, for example, plugin for Lens or OpenShift UI Console.

      Split Jira and Commit Validation Sections⚓︎

      Commit Validate step was designed to be aligned with Jira Integration and cannot be used as single feature. Target state is to ensure features CommitMessage Validation and Jira Integration both can be used independently. We also want to add support for Conventional Commits.

      EDP Release 3.2.0

      EDP Portal has separate sections for Jira Integration and CommitMessage Validation step.

      V. Documentation as Code⚓︎

      Goals:

      • Transparent documentation and clear development guidelines for EDP customization.
      • Components that provide Documentation as Code feature should be integrated into EDP.

      EDP Release 3.4.0

      Antora was introduced as framework that provides Documentation as Code capabilities.

      Consolidate documentation in a single repository edp-install, use mkdocs tool to generate docs and GitHub Pages as a hosting solution.

      \ No newline at end of file + RoadMap - EPAM Delivery Platform

      RoadMap⚓︎

      RoadMap consists of three streams:

      I. Community⚓︎

      Goals:

      • Innovation Through Collaboration
      • Improve OpenSource Adoption
      • Build Community around technology solutions EDP is built on

      Deliver Operators on OperatorHub⚓︎

      OperatorHub is a defacto leading solution which consolidates Kubernetes Community around Operators. EDP follows the best practices of delivering Operators in a quick and reliable way. We want to improve Deployment and Management experience for our Customers by publishing all EDP operators on this HUB.

      Another artifact aggregator which is used by EDP - ArtifactHub, that holds description for both components: stable and under-development.

      OperatorHub. Keycloak Operator

      EDP Keycloak Operator is now available from OperatorHub both for Upstream (Kubernetes) and OpenShift deployments.

      II. Architecture⚓︎

      Goals:

      • Improve reusability for EDP components
      • Integrate Kubernetes Native Deployment solutions
      • Introduce abstraction layer for CI/CD components
      • Build processes around the GitOps approach
      • Introduce secrets management

      Kubernetes Multitenancy⚓︎

      Multiple instances of EDP are run in a single Kubernetes cluster. One way to achieve this is to use Multitenancy. Initially, Kiosk was selected as tools that provides this capability. An alternative option that EDP Team took into consideration is Capsule. Another tool which goes far beyond multitenancy is vcluster going a good candidate for e2e testing scenarios where one needs simple lightweight kubernetes cluster in CI pipelines.

      EDP Release 3.5.3

      The EPAM Delivery Platform (EDP) has added Capsule as a general tenant management solution for Kubernetes. Capsule is an open-source operator that enables you to create and manage multiple tenants on a shared Kubernetes cluster, while ensuring resource isolation, security, and governance.

      EDP Release 3.5.3

      Vcluster is actively used in EDP for e2e testing purposes.

      Microservice Reference Architecture Framework⚓︎

      EDP provides basic Application Templates for a number of technology stacks (Java, .Net, NPM, Python) and Helm is used as a deployment tool. The goal is to extend this library and provide: Application Templates which are built on pre-defined architecture patterns (e.g., Microservice, API Gateway, Circuit Breaker, CQRS, Event Driven) and Deployment Approaches: Canary, Blue/Green. This requires additional tools installation on cluster as well.

      Policy Enforcement for Kubernetes⚓︎

      Running workload in Kubernetes calls for extra effort from Cluster Administrators to ensure those workloads do follow best practices or specific requirements defined on organization level. Those requirements can be formalized in policies and integrated into: CI Pipelines and Kubernetes Cluster (through Admission Controller approach) - to guarantee proper resource management during development and runtime phases. EDP uses Open Policy Agent (from version 2.8.0), since it supports compliance check for more use-cases: Kubernetes Workloads, Terraform and Java code, HTTP APIs and many others. Kyverno is another option being checked in scope of this activity.

      Secrets Management⚓︎

      EDP should provide secrets management as a part of platform. There are multiple tools providing secrets management capabilities. The aim is to be aligned with GitOps and Operator Pattern approaches so HashiCorp Vault, Banzaicloud Bank Vaults, Bitnami Sealed Secrets are currently used for internal projects and some of them should be made publicly available - as a part of EDP Deployment.

      EDP Release 2.12.x

      External Secret Operator is a recommended secret management tool for the EDP components.

      Release Management⚓︎

      Conventional Commits and Conventional Changelog are two approaches to be used as part of release process. Today EDP provides only capabilities to manage Release Branches. This activity should address this gap by formalizing and implementing Release Process as a part of EDP. Topics to be covered: Versioning, Tagging, Artifacts Promotion.

      Kubernetes Native CI/CD Pipelines⚓︎

      EDP has deprecated Jenkins in favour of Tekton. Jenkins is no longer available since EDP v3.4.4.

      EDP Release 2.12.x

      Argo CD is suggested as a solution providing the Continuous Delivery capabilities.

      EDP Release 3.0

      Tekton is used as a CI/CD pipelines orchestration tool on the platform. Review edp-tekton GitHub repository that keeps all the logic behind this solution on the EDP (Pipelines, Tasks, TriggerTemplates, Interceptors, etc). Get acquainted with the series of publications on our Medium Page.

      Advanced EDP Role-based Model⚓︎

      EDP has a number of base roles which are used across EDP. In some cases it is necessary to provide more granular permissions for specific users. It is possible to do this using Kubernetes Native approach.

      Notifications Framework⚓︎

      EDP has a number of components which need to report their statuses: Build/Code Review/Deploy Pipelines, changes in Environments, updates with artifacts. The goal for this activity is to onboard Kubernetes Native approach which provides Notification capabilities with different sources/channels integration (e.g. Email, Slack, MS Teams). Some of these tools are Argo Events, Botkube.

      Reconciler Component Retirement⚓︎

      Persistent layer, which is based on edp-db (PostgreSQL) and reconciler component should be retired in favour of Kubernetes Custom Resource (CR). The latest features in EDP are implemented using CR approach.

      EDP Release 3.0

      Reconciler component is deprecated and is no longer supported. All the EDP components are migrated to Kubernetes Custom Resources (CR).

      III. Building Blocks⚓︎

      Goals:

      • Introduce best practices from Microservice Reference Architecture deployment and observability using Kubernetes Native Tools
      • Enable integration with the Centralized Test Reporting Frameworks
      • Onboard SAST/DAST tool as a part of CI pipelines and Non-Functional Testing activities

      EDP Release 2.12.x

      SAST is introduced as a mandatory part of the CI Pipelines. The list of currently supported SAST scanners.

      Infrastructure as Code⚓︎

      EDP Target tool for Infrastructure as Code (IaC) is Terraform. EDP sees two CI/CD scenarios while working with IaC: Module Development and Live Environment Deployment. Today, EDP provides basic capabilities (CI Pipelines) for Terraform Module Development. At the same time, currently EDP doesn't provide Deployment pipelines for Live Environments and the feature is under development. Terragrunt is an option to use in Live Environment deployment. Another Kubernetes Native approach to provision infrastructure components is Crossplane.

      Database Schema Management⚓︎

      One of the challenges for Application running in Kubernetes is to manage database schema. There are a number of tools which provides such capabilities, e.g. Liquibase, Flyway. Both tools provide versioning control for database schemas. There are different approaches on how to run migration scripts in Kubernetes: in init container, as separate Job or as a separate CD stage. Purpose of this activity is to provide database schema management solution in Kubernetes as a part of EDP. EDP Team investigates SchemaHero tool and use-cases which suits Kubernetes native approach for database schema migrations.

      Open Policy Agent⚓︎

      Open Policy Agent is introduced in version 2.8.0. EDP now supports CI for Rego Language, so you can develop your own policies. The next goal is to provide pipeline steps for running compliance policies check for Terraform, Java, Helm Chart as a part of CI process.

      Report Portal⚓︎

      EDP uses Allure Framework as a Test Report tool. Another option is to integrate Report Portal into EDP ecosystem.

      EDP Release 3.0

      Use ReportPortal to consolidate and analyze your Automation tests results. Consult our pages on how to perform reporting and Keycloak integration.

      Carrier⚓︎

      Carrier provides Non-functional testing capabilities.

      Java 17⚓︎

      EDP supports two LTS versions of Java: 8 and 11. The goal is to provide Java 17 (LTS) support.

      EDP Release 3.2.1

      CI Pipelines for Java 17 is available in EDP.

      Velero⚓︎

      Velero is used as a cluster backup tool and is deployed as a part of Platform. Currently, Multitenancy/On-premise support for backup capabilities is in process.

      Istio⚓︎

      Istio is to be used as a Service Mesh and to address challenges for Microservice or Distributed Architectures.

      Kong⚓︎

      Kong is one of tools which is planned to use as an API Gateway solution provider. Another possible candidate for investigation is Ambassador API Gateway

      OpenShift 4.X⚓︎

      EDP supports the OpenShift 4.9 platform.

      EDP Release 2.12.x

      EDP Platform runs on the latest OKD versions: 4.9 and 4.10. Creating the IAM Roles for Service Account is a recommended way to work with AWS Resources from the OKD cluster.

      IV. Admin Console (UI)⚓︎

      Goals:

      • Improve UÐ¥ for different user types to address their concerns in the delivery model
      • Introduce user management capabilities
      • Enrich with traceability metrics for products

      EDP Release 2.12.x

      EDP Team has introduced a new UI component called EDP Headlamp, which will replace the EDP Admin Console in future releases. EDP Headlamp is based on the Kinvolk Headlamp UI Client.

      EDP Release 3.0

      EDP Headlamp is used as a Control Plane UI on the platform.

      EDP Release 3.4

      Since EDP v3.4.0, Headlamp UI has been renamed to EDP Portal.

      Users Management⚓︎

      EDP uses Keycloak as an Identity and Access provider. EDP roles/groups are managed inside the Keycloak realm, then these changes are propagated across the EDP Tools. We plan to provide this functionality in EDP Portal using the Kubernetes-native approach (Custom Resources).

      The Delivery Pipelines Dashboard⚓︎

      The CD Pipeline section in EDP Portal provides basic information, such as environments, artifact versions deployed per each environment, and direct links to the namespaces. One option is to enrich this panel with metrics from the Prometheus, custom resources, or events. Another option is to use the existing dashboards and expose EDP metrics to them, for example, plugin for Lens or OpenShift UI Console.

      Split Jira and Commit Validation Sections⚓︎

      Commit Validate step was designed to be aligned with Jira Integration and cannot be used as single feature. Target state is to ensure features CommitMessage Validation and Jira Integration both can be used independently. We also want to add support for Conventional Commits.

      EDP Release 3.2.0

      EDP Portal has separate sections for Jira Integration and CommitMessage Validation step.

      V. Documentation as Code⚓︎

      Goals:

      • Transparent documentation and clear development guidelines for EDP customization.
      • Components that provide Documentation as Code feature should be integrated into EDP.

      EDP Release 3.4.0

      Antora was introduced as framework that provides Documentation as Code capabilities.

      Consolidate documentation in a single repository edp-install, use mkdocs tool to generate docs and GitHub Pages as a hosting solution.

      \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index b2616e18a..e1a406293 100644 --- a/sitemap.xml +++ b/sitemap.xml @@ -2,802 +2,802 @@ https://epam.github.io/edp-install/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/compliance/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/faq/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/features/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/glossary/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/overview/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/pricing/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/roadmap/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/supported-versions/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/annotations-and-labels/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/autotest-coverage/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/aws-deployment-diagram/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/aws-infrastructure-cost-estimation/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/aws-reference-architecture/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/edp-workflow/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/kubernetes-deployment/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/local-development/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/mk-docs-development/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/reference-architecture/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/reference-cicd-pipeline/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/developer-guide/telemetry/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/add-ons-overview/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/advanced-installation-overview/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/argocd-integration/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/artifacts-verification/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/aws-marketplace-install/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/capsule/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/configure-keycloak-oidc-eks/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/container-registries/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/container-registry-harbor-integration-tekton-ci/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/customize_deployment/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/delete-edp/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/dependency-track/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/deploy-aws-eks/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/deploy-okd-4.10/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/deploy-okd/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/ebs-csi-driver/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/edp-access-model/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/edp-kiosk-usage/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/eks-oidc-integration/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/enable-irsa/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/external-secrets-operator-integration/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/github-debug-webhooks/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/gitlab-debug-webhooks/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/harbor-oidc/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/headlamp-oidc/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/import-strategy-tekton/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-argocd/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-defectdojo/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-edp/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-external-secrets-operator/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-harbor/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-ingress-nginx/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-keycloak/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-kiosk/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-loki/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-reportportal/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-tekton/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-velero/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-via-civo/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/install-via-helmfile/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/installation-overview/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/jira-gerrit-integration/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/jira-integration/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/kaniko-irsa/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/kibana-ilm-rollover/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/kubernetes-cluster-settings/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/logsight-integration/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/loki-irsa/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/manage-custom-certificate/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/migrate-ci-pipelines-from-jenkins-to-tekton/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/multitenant-logging/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/namespace-management/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/nexus-sonatype/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/notification-msteams/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/oauth2-proxy/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/openshift-cluster-settings/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/overview-devsecops/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/overview-multi-tenancy/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/overview-sast/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/package-registry/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/prerequisites/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/report-portal-integration-tekton/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/reportportal-keycloak/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/restore-edp-with-velero/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/schedule-pods-restart/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/sonarqube-visibility/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/sonarqube/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/ssl-automation-okd/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/tekton-monitoring/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/tekton-overview/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/upgrade-edp-3.0/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/upgrade-edp-3.1/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/upgrade-edp-3.2/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/upgrade-edp-3.3/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/upgrade-edp-3.4/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/upgrade-edp-3.5/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/upgrade-edp-3.6/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/upgrade-edp-3.7/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/upgrade-edp-3.8/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/upgrade-edp-3.9/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/upgrade-keycloak-19.0/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/vcs/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/velero-irsa/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/waf-tf-configuration/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/troubleshooting/application-not-built/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/troubleshooting/invalid-codebase-name/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/troubleshooting/overview/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/troubleshooting/resource-observability/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/troubleshooting/troubleshoot-applications/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/troubleshooting/troubleshoot-container-registries/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/troubleshooting/troubleshoot-git-server/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/operator-guide/troubleshooting/troubleshoot-stages/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/quick-start/create-application/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/quick-start/deploy-application/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/quick-start/integrate-argocd/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/quick-start/integrate-container-registry/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/quick-start/integrate-github/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/quick-start/integrate-sonarcloud/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/quick-start/platform-installation/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/quick-start/quick-start-overview/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/use-cases/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/use-cases/application-scaffolding/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/use-cases/autotest-as-quality-gate/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/use-cases/external-secrets/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/use-cases/tekton-custom-pipelines/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/add-application/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/add-autotest/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/add-cd-pipeline/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/add-cluster/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/add-git-server/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/add-infrastructure/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/add-library/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/add-marketplace/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/add-quality-gate/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/application/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/autotest/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/build-pipeline/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/cd-pipeline-details/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/ci-pipeline-details/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/cicd-overview/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/cluster/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/code-review-pipeline/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/components/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/configuration-overview/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/git-server-overview/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/gitops/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/infrastructure/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/library/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/manage-branches/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/manage-container-registries/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/manage-environments/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/marketplace/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/opa-stages/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/prepare-for-release/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/quick-links/ - 2024-07-04 + 2024-07-18 daily https://epam.github.io/edp-install/user-guide/terraform-stages/ - 2024-07-04 + 2024-07-18 daily \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz index b492362b819895698963cf3c1f65bd0132e74a60..c580a6ef2be415366bdca632380d05d1b829205f 100644 GIT binary patch delta 1140 zcmV-)1dIF449yHDABzYG%qp2?0{?SqbY*Q}a4vXlYyjPz&vM%~5XSF$3XktA*-qM# zCoq5C>?CCu!>8N()!prl1QV?b?Wfy6|M;Cg+*4|x?+yUu2-AM*NiaDBJ>c>DC`M!)I}rr(ZVk3O2GdY7!Xk%9YF>&kZAtXy1+ z9ms`T*!%V4x)z*Y$GP;;K0Vw$yraAKbpL-ri=2+likU5kR+=QwPnMlOJg#R)CZP3& z=s&WV*bm-uFtTXOhM$+sCtSuPdC`!qk2!NXEE+OuGu}DbFB)=4&S>AVXvP%T*6wS` zjCmY}n2K0Bb>7QvKvs6*;~J zsOR=Eb&ZdEDDZCFND87&de$(YEYQo?)9EN2eczFdCd2ao1a0@L3C0;qgB!%VfMr7w zFTIqd`xOezB_c?-D^#|Ys%nGqj9!1S?`mwpp3fge0;VOYji%aBM0MNZwazR0;ukVg;I_m~(O*H)q5$-(%@9YtpiNteOQD6HdZ zQoY|g^MU{!b? zs3bNW7`xUJssjR8H*_M8ouzlH`#Vf@Bx>1YGTD($PIV6yxt?NUB&+I)+Q*==cP{3R z#=P_>?KOIrCxHX1^Cj@ct2j=Xp|D?yq`;oCm?Y|@T+V~Ohp7-**^DjiWwVVDYpBvf zo{Eqg%p|FLm=YD_#cYS}I$tCqoSX=Djo?h{jC4CL*~DZK`THv@X0L8gLGpBy zQ0*baa!Hz^UIzs3p+DYam z1Ed96age~ZukWguI46BUu_w`+kFKtwhN|yVXpN@W+IRBn@N7Amf*4>UIKl^rAJ&*7asuC_(i?Rj{RJb%|-Gt$E|TD+fq^DCjWksjheJPoYDsyt`8- zavy`!gJ5pf2_7+8I}SQl*{QpHySl?gzsE&?z(s$Hi~bIie*_f)|Fe|@umK2IF%jRv GbpQaqML?PW delta 1140 zcmV-)1dIF449yHDABzYG!moyA0{?SqbY*Q}a4vXlYyjPz&vM%~5XSF$ij40o*^b+h zCoq4%-AT$WhEF$ltJ|Bm5=^u%w4ZMN{Ns1}aP#@;&EuGSmhx5$hy3YgTgvd;dcEK8 zSJ@TmdC04n+I2Qt{gBtEgX`PX$D5}&Z}qF*VEXO&_2{E{s&~nH8yUD?wXST(&C12K z*nwQgg}q-tu4}>hb(~8d?bH43{R7>;r`vxAEpj?GD`vJFT4|CzKUsGE@VK5GnSj<8 zqW{QdVn2At!N{U98-89kpKuwI$c0+tO$ zy!29*?pG)^@37D3oHkxWj5!G#r*E+B0i)+}F zTpcafID=<_inX3oCuQ)g{Ut(h?aBnhLmoX;-(zBETw8^HCkNvrcNCp9BwYqGqp*&v zN%el~)O&G)&3~XRr;VPU^+!~sEBeDYDAUGVV(q&p zppw{hVC-5?s168V-O!0Vc9tGicej}6NYt{)WU?choa!DZay`YyNLJMowU0q#?_A6s zjd|%&+H3SKPXY&2=S$#?S8<#&Lt(!ZNr63OF-g=*xts@m4^ttsvKd?2%VrxR)=;H| zJR=)$(#N$iLtzVB&;0-g?)rZ^lS}HGu(sI4$r*vNU^+}wqz94QN+YJ&_$4CJgSk)_ z?0*}(5RI97O+<1R+EmqYXkBm{1`QAKUEOEo{TB;ta0p_ge8=p=nx^jMT)7nr;C#Z1t{ImztDz zNlDApBsJ)ZgibG_5&<<_Klp73C9(F;9l}jlV7GvhBj3m;9T(oyEiflm6Iy(y{wA z2S^LD;vj)*U*ABs#weRHD;n{LB1u?*!L}he3rq|J4 zF8~a`)$IiA=tXOkt?SLSP=e}(s$fgc>JrmVTl2 Supported Versions and Compatibility - EPAM Delivery Platform

      Supported Versions and Compatibility⚓︎

      At the EPAM Delivery Platform, our commitment to providing a robust and reliable service involves supporting the latest innovations while ensuring stability and security. To maintain this balance, we focus our active support on our platform's three most recent versions. This approach allows us to dedicate our resources to delivering cutting-edge features, enhancing performance, and fortifying security measures while ensuring a consistent and optimized user experience across the platform.

      gantt
      + Supported Versions and Compatibility - EPAM Delivery Platform      

      Supported Versions and Compatibility⚓︎

      At the EPAM Delivery Platform, our commitment to providing a robust and reliable service involves supporting the latest innovations while ensuring stability and security. To maintain this balance, we focus our active support on our platform's three most recent versions. This approach allows us to dedicate our resources to delivering cutting-edge features, enhancing performance, and fortifying security measures while ensuring a consistent and optimized user experience across the platform.

      gantt
           dateFormat  YYYY-MM-DD
           title Release Schedule and Lifecycle
       
      @@ -25,4 +25,4 @@
       
           section (Unsupported)
           EDP 3.3 Release: r33, 2023-05-13, 2023-11-03
      -

      By concentrating our support on the last three versions, we prioritize staying current with evolving technology landscapes. This enables us to swiftly adapt to advancements in Kubernetes and OpenShift environments, incorporating the latest enhancements, fixes, and optimizations into our platform.

      This support model not only fosters innovation but also enables us to deliver a secure, high-performing, and dependable EPAM Delivery Platform that meets the dynamic needs of our users and exceeds expectations for reliability and functionality.

      Get acquainted with the list of the latest releases and component versions on which the platform is tested and verified:

      Professional Services

      You can leverage our professional services to upgrade your platform to the latest version or perform migration from Unsupported to Supported versions.

      EDP Release Version Release Date EKS Version OpenShift Version
      3.9 Jun 13, 2024 1.29 4.12
      3.8 Mar 12, 2024 1.26 4.12
      3.7 Dec 15, 2023 1.26 4.12
      3.6 Nov 03, 2023 1.26 4.12
      3.5 Sep 21, 2023 1.26 4.12
      3.4 Aug 18, 2023 1.26 4.12
      3.3 May 25, 2023 1.26 4.12
      3.2 Mar 26, 2023 1.23 4.10
      3.1 Jan 24, 2023 1.23 4.10
      \ No newline at end of file +

      By concentrating our support on the last three versions, we prioritize staying current with evolving technology landscapes. This enables us to swiftly adapt to advancements in Kubernetes and OpenShift environments, incorporating the latest enhancements, fixes, and optimizations into our platform.

      This support model not only fosters innovation but also enables us to deliver a secure, high-performing, and dependable EPAM Delivery Platform that meets the dynamic needs of our users and exceeds expectations for reliability and functionality.

      Get acquainted with the list of the latest releases and component versions on which the platform is tested and verified:

      Professional Services

      You can leverage our professional services to upgrade your platform to the latest version or perform migration from Unsupported to Supported versions.

      EDP Release Version Release Date EKS Version OpenShift Version
      3.9 Jun 13, 2024 1.29 4.12
      3.8 Mar 12, 2024 1.26 4.12
      3.7 Dec 15, 2023 1.26 4.12
      3.6 Nov 03, 2023 1.26 4.12
      3.5 Sep 21, 2023 1.26 4.12
      3.4 Aug 18, 2023 1.26 4.12
      3.3 May 25, 2023 1.26 4.12
      3.2 Mar 26, 2023 1.23 4.10
      3.1 Jan 24, 2023 1.23 4.10
      \ No newline at end of file diff --git a/use-cases/application-scaffolding/index.html b/use-cases/application-scaffolding/index.html index 8928a2492..df3882703 100644 --- a/use-cases/application-scaffolding/index.html +++ b/use-cases/application-scaffolding/index.html @@ -1,4 +1,4 @@ - Scaffold and Deploy FastAPI Application - EPAM Delivery Platform

      Scaffold and Deploy FastAPI Application⚓︎

      Overview⚓︎

      This use case describes the creation and deployment of a FastAPI application to enable a developer to quickly generate a functional code structure for a FastAPI web application (with basic read functionality), customize it to meet specific requirements, and deploy it to a development environment. By using a scaffolding tool and a standardized process for code review, testing and deployment, developers can reduce the time and effort required to build and deploy a new application while improving the quality and reliability of the resulting code. Ultimately, the goal is to enable the development team to release new features and applications more quickly and efficiently while maintaining high code quality and reliability.

      Roles⚓︎

      This documentation is tailored for the Developers and Team Leads.

      Goals⚓︎

      • Create a new FastAPI application quickly.
      • Deploy the initial code to the DEV environment.
      • Check CI pipelines.
      • Perform code review.
      • Delivery update by deploying the new version.

      Preconditions⚓︎

      • EDP instance is configured with Gerrit, Tekton and Argo CD.
      • Developer has access to the EDP instances using the Single-Sign-On approach.
      • Developer has the Administrator role (to perform merge in Gerrit).

      Scenario⚓︎

      To scaffold and deploy FastAPI Application, follow the steps below.

      Scaffold the New FastAPI Application⚓︎

      1. Open EDP Portal URL. Use the Sign-In option.

        Logging Page
        Logging screen

      2. Ensure Namespace value in the User Settings tab points to the namespace with the EDP installation.

        Settings
        Settings button

      3. Create the new Codebase with the Application type using the Create strategy. To do this, open EDP tab.

        Cluster Overview
        Cluster overview

      4. Select the Components Section under the EDP tab and push the create + button.

        Components Overview
        Components tab

      5. Select the Application Codebase type because we are going to deliver our application as a container and deploy it inside the Kubernetes cluster. Choose the Create strategy to scaffold our application from the template provided by the EDP and press the Proceed button.

        Codebase Info
        Step codebase info

      6. On the Application Info tab, define the following values and press the Proceed button:

        • Application name: fastapi-demo
        • Default branch: main
        • Application code language: Python
        • Language version/framework: FastAPI
        • Build tool: Python

        Application Info
        Application info

      7. On the Advances Settings tab, define the below values and push the Apply button:

        • CI tool: Tekton
        • Codebase versioning type: edp
        • Start version from: 0.0.1 and SNAPSHOT

        Advanced Settings
        Advanced settings

      8. Check the application status. It should be green:

        Components overview page
        Application status

      Deploy the Application to the Development Environment⚓︎

      This section describes the application deployment approach from the latest branch commit. The general steps are:

      • Build the initial version (generated from the template) of the application from the last commit of the main branch.
      • Create a CD Pipeline to establish continuous delivery to the development environment.
      • Deploy the initial version to the development env.

      To succeed with the steps above, follow the instructions below:

      1. Build Container from the latest branch commit. To build the initial version of the application's main branch, go to the fastapi-demo application -> branches -> main and select the Build menu.

        Build Main Branch
        Application building

      2. Build pipeline for the fastapi-demo application starts.

        Branch Build Pipeline
        Pipeline building

      3. Track Pipeline's status by accessing Tekton Dashboard by clicking the fastapi-demo-main-build-lb57m application link.

        Alt text
        Console logs

      4. Ensure that Build Pipeline was successfully completed.

      5. Create CD Pipeline. To enable application deployment create a CD Pipeline with a single environment - Development (with the name dev).

      6. Go to EDP Portal -> EDP -> CD Pipelines tab and push the + button to create pipeline. In the Create CD Pipeline dialog, define the below values:

        • Pipeline tab:

          • Pipeline name: mypipe
          • Deployment type: Container, since we are going to deploy containers

          CD Pipeline name
          Pipeline tab with parameters

        • Applications tab. Add fastapi-demo application, select main branch, and leave Promote in pipeline unchecked:

          CD Pipeline Add Application
          Applications tab with parameters

        • Stages tab. Add the dev stage with the values below:

          • Stage name: dev
          • Description: Development Environment
          • Trigger type: Manual. We plan to deploy applications to this environment manually
          • Quality gate type: Manual
          • Step name: approve
          • Push the Apply button

          CD Pipeline Add Stage
          Stages tab with parameters

      7. Deploy the initial version of the application to the development environment:

        • Open CD Pipeline with the name mypipe.
        • Select the dev stage from the Stages tab.
        • In the Image stream version select version 0.0.1-SNAPSHOT.1 and push the Deploy button.

        CD Pipeline Deploy initial version
        CD Pipeline deploy

      Check the Application Status⚓︎

      To ensure the application is deployed successfully, follow the steps below:

      1. Ensure application status is Healthy and Synced, and the Deployed version points to 0.0.1-SNAPSHOT.1:

        CD Pipeline health status
        Pipeline health status

      2. Check that the selected version of the container is deployed on the dev environment. ${EDP_ENV} - is the EDP namespace name:

        # Check the deployment status of fastapi-demo application
        + Scaffold and Deploy FastAPI Application - EPAM Delivery Platform      

        Scaffold and Deploy FastAPI Application⚓︎

        Overview⚓︎

        This use case describes the creation and deployment of a FastAPI application to enable a developer to quickly generate a functional code structure for a FastAPI web application (with basic read functionality), customize it to meet specific requirements, and deploy it to a development environment. By using a scaffolding tool and a standardized process for code review, testing and deployment, developers can reduce the time and effort required to build and deploy a new application while improving the quality and reliability of the resulting code. Ultimately, the goal is to enable the development team to release new features and applications more quickly and efficiently while maintaining high code quality and reliability.

        Roles⚓︎

        This documentation is tailored for the Developers and Team Leads.

        Goals⚓︎

        • Create a new FastAPI application quickly.
        • Deploy the initial code to the DEV environment.
        • Check CI pipelines.
        • Perform code review.
        • Delivery update by deploying the new version.

        Preconditions⚓︎

        • EDP instance is configured with Gerrit, Tekton and Argo CD.
        • Developer has access to the EDP instances using the Single-Sign-On approach.
        • Developer has the Administrator role (to perform merge in Gerrit).

        Scenario⚓︎

        To scaffold and deploy FastAPI Application, follow the steps below.

        Scaffold the New FastAPI Application⚓︎

        1. Open EDP Portal URL. Use the Sign-In option.

          Logging Page
          Logging screen

        2. Ensure Namespace value in the User Settings tab points to the namespace with the EDP installation.

          Settings
          Settings button

        3. Create the new Codebase with the Application type using the Create strategy. To do this, open EDP tab.

          Cluster Overview
          Cluster overview

        4. Select the Components Section under the EDP tab and push the create + button.

          Components Overview
          Components tab

        5. Select the Application Codebase type because we are going to deliver our application as a container and deploy it inside the Kubernetes cluster. Choose the Create strategy to scaffold our application from the template provided by the EDP and press the Proceed button.

          Codebase Info
          Step codebase info

        6. On the Application Info tab, define the following values and press the Proceed button:

          • Application name: fastapi-demo
          • Default branch: main
          • Application code language: Python
          • Language version/framework: FastAPI
          • Build tool: Python

          Application Info
          Application info

        7. On the Advances Settings tab, define the below values and push the Apply button:

          • CI tool: Tekton
          • Codebase versioning type: edp
          • Start version from: 0.0.1 and SNAPSHOT

          Advanced Settings
          Advanced settings

        8. Check the application status. It should be green:

          Components overview page
          Application status

        Deploy the Application to the Development Environment⚓︎

        This section describes the application deployment approach from the latest branch commit. The general steps are:

        • Build the initial version (generated from the template) of the application from the last commit of the main branch.
        • Create a CD Pipeline to establish continuous delivery to the development environment.
        • Deploy the initial version to the development env.

        To succeed with the steps above, follow the instructions below:

        1. Build Container from the latest branch commit. To build the initial version of the application's main branch, go to the fastapi-demo application -> branches -> main and select the Build menu.

          Build Main Branch
          Application building

        2. Build pipeline for the fastapi-demo application starts.

          Branch Build Pipeline
          Pipeline building

        3. Track Pipeline's status by accessing Tekton Dashboard by clicking the fastapi-demo-main-build-lb57m application link.

          Alt text
          Console logs

        4. Ensure that Build Pipeline was successfully completed.

        5. Create CD Pipeline. To enable application deployment create a CD Pipeline with a single environment - Development (with the name dev).

        6. Go to EDP Portal -> EDP -> CD Pipelines tab and push the + button to create pipeline. In the Create CD Pipeline dialog, define the below values:

          • Pipeline tab:

            • Pipeline name: mypipe
            • Deployment type: Container, since we are going to deploy containers

            CD Pipeline name
            Pipeline tab with parameters

          • Applications tab. Add fastapi-demo application, select main branch, and leave Promote in pipeline unchecked:

            CD Pipeline Add Application
            Applications tab with parameters

          • Stages tab. Add the dev stage with the values below:

            • Stage name: dev
            • Description: Development Environment
            • Trigger type: Manual. We plan to deploy applications to this environment manually
            • Quality gate type: Manual
            • Step name: approve
            • Push the Apply button

            CD Pipeline Add Stage
            Stages tab with parameters

        7. Deploy the initial version of the application to the development environment:

          • Open CD Pipeline with the name mypipe.
          • Select the dev stage from the Stages tab.
          • In the Image stream version select version 0.0.1-SNAPSHOT.1 and push the Deploy button.

          CD Pipeline Deploy initial version
          CD Pipeline deploy

        Check the Application Status⚓︎

        To ensure the application is deployed successfully, follow the steps below:

        1. Ensure application status is Healthy and Synced, and the Deployed version points to 0.0.1-SNAPSHOT.1:

          CD Pipeline health status
          Pipeline health status

        2. Check that the selected version of the container is deployed on the dev environment. ${EDP_ENV} - is the EDP namespace name:

          # Check the deployment status of fastapi-demo application
           $ kubectl get deployments -n ${EDP_ENV}-mypipe-dev
           NAME                 READY   UP-TO-DATE   AVAILABLE   AGE
           fastapi-demo-dl1ft   1/1     1            1           30m
          @@ -23,4 +23,4 @@
           # Check application external URL
           curl https://your-hostname-appeared-in-hosts-column-above.example.com/
           {"Hello":"World"}
          -
        \ No newline at end of file +
      \ No newline at end of file diff --git a/use-cases/autotest-as-quality-gate/index.html b/use-cases/autotest-as-quality-gate/index.html index 9c673b99f..6aa758957 100644 --- a/use-cases/autotest-as-quality-gate/index.html +++ b/use-cases/autotest-as-quality-gate/index.html @@ -1 +1 @@ - Autotest as a Quality Gate - EPAM Delivery Platform

      Autotest as a Quality Gate⚓︎

      This use case describes the flow of adding an autotest as a quality gate to a newly created CD pipeline with a selected build version of an application to be promoted. The purpose of autotests is to check if application meets predefined criteria for stability and functionality, ensuring that only reliable versions are promoted. The promotion feature allows users to implement complicated testing, thus improving application stability.

      Roles⚓︎

      This documentation is tailored for the Developers and Quality Assurance specialists.

      Goals⚓︎

      • Create several applications and autotests quickly.
      • Create a pipeline for Continuous Deployment.
      • Perform testing.
      • Update delivery by deploying the new version.

      Preconditions⚓︎

      • EDP instance is configured with Gerrit, Tekton and Argo CD.
      • Developer has access to the EDP instances using the Single-Sign-On approach.
      • Developer has the Administrator role (to perform merge in Gerrit).

      Create Applications⚓︎

      To implement autotests as Quality Gates, follow the steps below:

      1. Ensure the namespace is specified in the cluster settings. Click the Settings icon in the top right corner and select Cluster settings:

        Cluster settings
        Cluster settings

      2. Enter the name of the default namespace, then enter your default namespace in the Allowed namespaces field and click the + button. You can also add other namespaces to the Allowed namespaces:

        Specify namespace
        Specify namespace

      3. Create several applications using the Create strategy. Navigate to the EDP tab, choose Components, click the + button:

        Add component
        Add component

      4. Select Application and Create from template:

        Create new component menu
        Create new component menu

        Note

        Please refer to the Add Application section for details.

      5. On the Codebase info tab, define the following values and press the Proceed button:

        • Git server: gerrit
        • Git repo relative path: js-application
        • Component name: js-application
        • Description: js application
        • Application code language: JavaScript
        • Language version/Provider: Vue
        • Build tool: NPM

        Codebase info tab
        Codebase info tab

      6. On the Advanced settings tab, define the below values and push the Apply button:

        • Default branch: main
        • Codebase versioning type: default

        Advanced settings tab
        Advanced settings tab

      7. Repeat the procedure twice to create the go-application and python-application applications. These applications will have the following parameters:

        go-application:

        • Git server: gerrit
        • Git repo relative path: go-application
        • Component name: go-application
        • Description: go application
        • Application code language: Go
        • Language version/Provider: Gin
        • Build tool: Go
        • Default branch: main
        • Codebase versioning type: default

        python-application:

        • Git server: gerrit
        • Git repo relative path: python-application
        • Component name: python-application
        • Description: python application
        • Application code language: Python
        • Language version/Provider: FastAPI
        • Build tool: Python
        • Default branch: main
        • Codebase versioning type: default
      8. In the Components tab, click one of the applications name to enter the application menu:

        Components list
        Components list

      9. Click the three dots (&vellip;) button, select Build:

        Application menu
        Application menu

      10. Click the down arrow (v) to observe and wait for the application to be built:

        Application building
        Application building

      11. Click the application run name to watch the building logs in Tekton:

        Tekton pipeline run
        Tekton pipeline run

      12. Wait till the build is successful:

        Successful build
        Successful build

      13. Repeat steps 8-12 for the rest of the applications.

      Create Autotests⚓︎

      The steps below instruct how to create autotests in EDP:

      1. Create a couple of autotests using the Create strategy. Navigate to the EDP tab, choose Components, click on the + button. Select Autotest and Clone project:

        Add autotest
        Add autotest

        Note

        Please refer to the Add Autotest section for details.

      2. On the Codebase info tab, define the following values and press the Proceed button:

        • Repository URL: https://github.com/SergK/autotests.git
        • Git server: gerrit
        • Git repo relative path: demo-autotest-gradle
        • Component name: demo-autotest-gradle
        • Description: demo-autotest-gradle
        • Autotest code language: Java
        • Language version/framework: Java11
        • Build tool: Gradle
        • Autotest report framework: Allure

        Codebase info tab for autotests
        Codebase info tab for autotests

      3. On the Advanced settings tab, leave the settings as is and click the Apply button:

        Advanced settings tab for autotests
        Advanced settings tab for autotests

      4. Repeat the steps 1-3 to create one more autotest with the parameters below:

        • Repository URL: https://github.com/Rolika4/autotests.git
        • Git server: gerrit
        • Git repo relative path: demo-autotest-maven
        • Component name: demo-autotest-maven
        • Description: demo-autotest-maven
        • Autotest code language: Java
        • Language version/framework: Java11
        • Build tool: Maven
        • Autotest report framework: Allure

      Create CD Pipeline⚓︎

      Now that applications and autotests are created, create pipeline for them by following the steps below:

      1. Navigate to the CD Pipelines tab and click the + button:

        CD pipelines tab
        CD pipelines tab

      2. On the Pipeline tab, in the Pipeline name field, enter demo-pipeline:

        Pipeline tab
        Pipeline tab

      3. On the Applications tab, add all the three applications, specify the main branch for all for them and check Promote in pipeline for Go and JavaScript applications:

        Applications tab
        Applications tab

      4. On the Stages tab, click the Add stage button to open the Create stage menu:

        Stages tab
        Stages tab

      5. In the Create stage menu, specify the following parameters and click Apply:

        • Cluster: In cluster
        • Stage name: dev
        • Description: dev
        • Trigger type: manual
        • Quality gate type: Autotests
        • Step name: dev
        • Autotest: demo-autotest-gradle
        • Autotest branch: main

        Create stage menu
        Create stage menu

      6. After the dev stage is added, click Apply:

        Create stage menu
        Create stage menu

      7. After the pipeline is created, click its name to open the pipeline details page:

        Enter pipeline
        Enter pipeline

      8. In the pipeline details page, click the Create button to create a new stage:

        Create a new stage
        Create a new stage

      9. In the Create stage menu, specify the following parameters:

        • Cluster: In cluster
        • Stage name: sit
        • Description: sit
        • Trigger type: manual
        • Quality gate type: Autotests
        • Step name: dev
        • Autotest: demo-autotest-maven
        • Autotest branch: main

      Run Autotests⚓︎

      After the CD pipeline is created, deploy applications and run autotests by following the steps below:

      1. Click the dev stage name to expand its details, specify image versions for each of the applications in the Image stream version field and click Deploy:

        Deploy applications
        Deploy applications

      2. Once applications are built, scroll down to Quality Gates and click Promote:

        Promote in pipeline
        Promote in pipeline

      3. Once promotion procedure is finished, the promoted applications will become available in the Sit stage. You will be able to select image stream versions for the promoted applications. The non-promoted application will stay grey in the stage and won't be allowed to get deployed:

        Sit stage
        Sit stage

      \ No newline at end of file + Autotest as a Quality Gate - EPAM Delivery Platform

      Autotest as a Quality Gate⚓︎

      This use case describes the flow of adding an autotest as a quality gate to a newly created CD pipeline with a selected build version of an application to be promoted. The purpose of autotests is to check if application meets predefined criteria for stability and functionality, ensuring that only reliable versions are promoted. The promotion feature allows users to implement complicated testing, thus improving application stability.

      Roles⚓︎

      This documentation is tailored for the Developers and Quality Assurance specialists.

      Goals⚓︎

      • Create several applications and autotests quickly.
      • Create a pipeline for Continuous Deployment.
      • Perform testing.
      • Update delivery by deploying the new version.

      Preconditions⚓︎

      • EDP instance is configured with Gerrit, Tekton and Argo CD.
      • Developer has access to the EDP instances using the Single-Sign-On approach.
      • Developer has the Administrator role (to perform merge in Gerrit).

      Create Applications⚓︎

      To implement autotests as Quality Gates, follow the steps below:

      1. Ensure the namespace is specified in the cluster settings. Click the Settings icon in the top right corner and select Cluster settings:

        Cluster settings
        Cluster settings

      2. Enter the name of the default namespace, then enter your default namespace in the Allowed namespaces field and click the + button. You can also add other namespaces to the Allowed namespaces:

        Specify namespace
        Specify namespace

      3. Create several applications using the Create strategy. Navigate to the EDP tab, choose Components, click the + button:

        Add component
        Add component

      4. Select Application and Create from template:

        Create new component menu
        Create new component menu

        Note

        Please refer to the Add Application section for details.

      5. On the Codebase info tab, define the following values and press the Proceed button:

        • Git server: gerrit
        • Git repo relative path: js-application
        • Component name: js-application
        • Description: js application
        • Application code language: JavaScript
        • Language version/Provider: Vue
        • Build tool: NPM

        Codebase info tab
        Codebase info tab

      6. On the Advanced settings tab, define the below values and push the Apply button:

        • Default branch: main
        • Codebase versioning type: default

        Advanced settings tab
        Advanced settings tab

      7. Repeat the procedure twice to create the go-application and python-application applications. These applications will have the following parameters:

        go-application:

        • Git server: gerrit
        • Git repo relative path: go-application
        • Component name: go-application
        • Description: go application
        • Application code language: Go
        • Language version/Provider: Gin
        • Build tool: Go
        • Default branch: main
        • Codebase versioning type: default

        python-application:

        • Git server: gerrit
        • Git repo relative path: python-application
        • Component name: python-application
        • Description: python application
        • Application code language: Python
        • Language version/Provider: FastAPI
        • Build tool: Python
        • Default branch: main
        • Codebase versioning type: default
      8. In the Components tab, click one of the applications name to enter the application menu:

        Components list
        Components list

      9. Click the three dots (&vellip;) button, select Build:

        Application menu
        Application menu

      10. Click the down arrow (v) to observe and wait for the application to be built:

        Application building
        Application building

      11. Click the application run name to watch the building logs in Tekton:

        Tekton pipeline run
        Tekton pipeline run

      12. Wait till the build is successful:

        Successful build
        Successful build

      13. Repeat steps 8-12 for the rest of the applications.

      Create Autotests⚓︎

      The steps below instruct how to create autotests in EDP:

      1. Create a couple of autotests using the Create strategy. Navigate to the EDP tab, choose Components, click on the + button. Select Autotest and Clone project:

        Add autotest
        Add autotest

        Note

        Please refer to the Add Autotest section for details.

      2. On the Codebase info tab, define the following values and press the Proceed button:

        • Repository URL: https://github.com/SergK/autotests.git
        • Git server: gerrit
        • Git repo relative path: demo-autotest-gradle
        • Component name: demo-autotest-gradle
        • Description: demo-autotest-gradle
        • Autotest code language: Java
        • Language version/framework: Java11
        • Build tool: Gradle
        • Autotest report framework: Allure

        Codebase info tab for autotests
        Codebase info tab for autotests

      3. On the Advanced settings tab, leave the settings as is and click the Apply button:

        Advanced settings tab for autotests
        Advanced settings tab for autotests

      4. Repeat the steps 1-3 to create one more autotest with the parameters below:

        • Repository URL: https://github.com/Rolika4/autotests.git
        • Git server: gerrit
        • Git repo relative path: demo-autotest-maven
        • Component name: demo-autotest-maven
        • Description: demo-autotest-maven
        • Autotest code language: Java
        • Language version/framework: Java11
        • Build tool: Maven
        • Autotest report framework: Allure

      Create CD Pipeline⚓︎

      Now that applications and autotests are created, create pipeline for them by following the steps below:

      1. Navigate to the CD Pipelines tab and click the + button:

        CD pipelines tab
        CD pipelines tab

      2. On the Pipeline tab, in the Pipeline name field, enter demo-pipeline:

        Pipeline tab
        Pipeline tab

      3. On the Applications tab, add all the three applications, specify the main branch for all for them and check Promote in pipeline for Go and JavaScript applications:

        Applications tab
        Applications tab

      4. On the Stages tab, click the Add stage button to open the Create stage menu:

        Stages tab
        Stages tab

      5. In the Create stage menu, specify the following parameters and click Apply:

        • Cluster: In cluster
        • Stage name: dev
        • Description: dev
        • Trigger type: manual
        • Quality gate type: Autotests
        • Step name: dev
        • Autotest: demo-autotest-gradle
        • Autotest branch: main

        Create stage menu
        Create stage menu

      6. After the dev stage is added, click Apply:

        Create stage menu
        Create stage menu

      7. After the pipeline is created, click its name to open the pipeline details page:

        Enter pipeline
        Enter pipeline

      8. In the pipeline details page, click the Create button to create a new stage:

        Create a new stage
        Create a new stage

      9. In the Create stage menu, specify the following parameters:

        • Cluster: In cluster
        • Stage name: sit
        • Description: sit
        • Trigger type: manual
        • Quality gate type: Autotests
        • Step name: dev
        • Autotest: demo-autotest-maven
        • Autotest branch: main

      Run Autotests⚓︎

      After the CD pipeline is created, deploy applications and run autotests by following the steps below:

      1. Click the dev stage name to expand its details, specify image versions for each of the applications in the Image stream version field and click Deploy:

        Deploy applications
        Deploy applications

      2. Once applications are built, scroll down to Quality Gates and click Promote:

        Promote in pipeline
        Promote in pipeline

      3. Once promotion procedure is finished, the promoted applications will become available in the Sit stage. You will be able to select image stream versions for the promoted applications. The non-promoted application will stay grey in the stage and won't be allowed to get deployed:

        Sit stage
        Sit stage

      \ No newline at end of file diff --git a/use-cases/external-secrets/index.html b/use-cases/external-secrets/index.html index 502ff403b..5cc10a76e 100644 --- a/use-cases/external-secrets/index.html +++ b/use-cases/external-secrets/index.html @@ -1,4 +1,4 @@ - Secured Secrets Management for Application Deployment - EPAM Delivery Platform

      Secured Secrets Management for Application Deployment⚓︎

      This Use Case demonstrates how to securely manage sensitive data, such as passwords, API keys, and other credentials, that are consumed by application during development or runtime in production. The approach involves storing sensitive data in an external secret store that is located in a "vault" namespace (but can be Vault, AWS Secret Store or any other provider). The process implies transmitting confidential information from the vault namespace to the deployed namespace for the purpose of establishing a connection to a database.

      Roles⚓︎

      This documentation is tailored for the Developers and Team Leads.

      Goals⚓︎

      • Make confidential information usage secure in the deployment environment.

      Preconditions⚓︎

      • EDP instance is configured with Gerrit, Tekton and Argo CD;
      • External Secrets is installed;
      • Developer has access to the EDP instances using the Single-Sign-On approach;
      • Developer has the Administrator role (to perform merge in Gerrit);
      • Developer has access to manage secrets in demo-vault namespace.

      Scenario⚓︎

      To use External Secret in EDP approach, follow the steps below:

      Add Application⚓︎

      To begin, you will need an application first. Here are the steps to create it:

      1. Open EDP Portal URL. Use the Sign-In option:

        Logging Page
        Logging screen

      2. In the top right corner, enter the Cluster settings and ensure that both Default namespace and Allowed namespace are set:

        Settings
        Cluster settings

      3. Create the new Codebase with the Application type using the Create strategy. To do this, click the EDP tab:

        Cluster Overview
        Cluster overview

      4. Select the Components section under the EDP tab and push the + button:

        Components Overview
        Components tab

      5. Select the Application Codebase type because we are going to deliver our application as a container and deploy it inside the Kubernetes cluster. Select the Create strategy to use predefined template:

        Codebase Info
        Step codebase info

      6. On the Application Info tab, define the following values and press the Proceed button:

        • Application name: es-usage
        • Default branch: master
        • Application code language: Java
        • Language version/framework: Java 17
        • Build tool: Maven

        Application Info
        Step application info

      7. On the Advanced Settings tab, define the below values and push the Apply button:

        • CI tool: Tekton
        • Codebase versioning type: default

        Application Info
        Step application info

      8. Check the application status. It should be green:

        Components overview page
        Application status

      Create CD Pipeline⚓︎

      This section outlines the process of establishing a CD pipeline within EDP Portal. There are two fundamental steps in this procedure:

      • Build the application from the last commit of the master branch;
      • Create a CD Pipeline to establish continuous delivery to the SIT environment.

      To succeed with the steps above, follow the instructions below:

      1. Create CD Pipeline. To enable application deployment, create a CD Pipeline with a single environment - System Integration Testing (SIT for short). Select the CD Pipelines section under the EDP tab and push the + button:

        CD-Pipeline Overview
        CD-Pipeline tab

      2. On the Pipeline tab, define the following values and press the Proceed button:

        • Pipeline name: deploy
        • Deployment type: Container

        Pipeline tab
        Pipeline tab

      3. On the Applications tab, add es-usage application, select master branch, leave Promote in pipeline unchecked and press the Proceed button:

        Pipeline tab
        Pipeline tab

      4. On the Stage tab, add the sit stage with the values below and push the Apply button:

        • Stage name: sit
        • Description: System integration testing
        • Trigger type: Manual. We plan to deploy applications to this environment manually
        • Quality gate type: Manual
        • Step name: approve

          Stage tab
          Stage tab

      Configure RBAC for External Secret Store⚓︎

      Note

      In this scenario, three namespaces are used: demo, which is the namespace where EDP is deployed, demo-vault, which is the vault where developers store secrets, anddemo-deploy-sit, which is the namespace used for deploying the application. The target namespace name for deploying application is formed with the pattern: edp-<cd_pipeline_name>-<stage_name>.

      To make the system to function properly, it is imperative to create the following resources:

      1. Create namespace demo-vault to store secrets:

         kubectl create namespace demo-vault
        + Secured Secrets Management for Application Deployment - EPAM Delivery Platform      

        Secured Secrets Management for Application Deployment⚓︎

        This Use Case demonstrates how to securely manage sensitive data, such as passwords, API keys, and other credentials, that are consumed by application during development or runtime in production. The approach involves storing sensitive data in an external secret store that is located in a "vault" namespace (but can be Vault, AWS Secret Store or any other provider). The process implies transmitting confidential information from the vault namespace to the deployed namespace for the purpose of establishing a connection to a database.

        Roles⚓︎

        This documentation is tailored for the Developers and Team Leads.

        Goals⚓︎

        • Make confidential information usage secure in the deployment environment.

        Preconditions⚓︎

        • EDP instance is configured with Gerrit, Tekton and Argo CD;
        • External Secrets is installed;
        • Developer has access to the EDP instances using the Single-Sign-On approach;
        • Developer has the Administrator role (to perform merge in Gerrit);
        • Developer has access to manage secrets in demo-vault namespace.

        Scenario⚓︎

        To use External Secret in EDP approach, follow the steps below:

        Add Application⚓︎

        To begin, you will need an application first. Here are the steps to create it:

        1. Open EDP Portal URL. Use the Sign-In option:

          Logging Page
          Logging screen

        2. In the top right corner, enter the Cluster settings and ensure that both Default namespace and Allowed namespace are set:

          Settings
          Cluster settings

        3. Create the new Codebase with the Application type using the Create strategy. To do this, click the EDP tab:

          Cluster Overview
          Cluster overview

        4. Select the Components section under the EDP tab and push the + button:

          Components Overview
          Components tab

        5. Select the Application Codebase type because we are going to deliver our application as a container and deploy it inside the Kubernetes cluster. Select the Create strategy to use predefined template:

          Codebase Info
          Step codebase info

        6. On the Application Info tab, define the following values and press the Proceed button:

          • Application name: es-usage
          • Default branch: master
          • Application code language: Java
          • Language version/framework: Java 17
          • Build tool: Maven

          Application Info
          Step application info

        7. On the Advanced Settings tab, define the below values and push the Apply button:

          • CI tool: Tekton
          • Codebase versioning type: default

          Application Info
          Step application info

        8. Check the application status. It should be green:

          Components overview page
          Application status

        Create CD Pipeline⚓︎

        This section outlines the process of establishing a CD pipeline within EDP Portal. There are two fundamental steps in this procedure:

        • Build the application from the last commit of the master branch;
        • Create a CD Pipeline to establish continuous delivery to the SIT environment.

        To succeed with the steps above, follow the instructions below:

        1. Create CD Pipeline. To enable application deployment, create a CD Pipeline with a single environment - System Integration Testing (SIT for short). Select the CD Pipelines section under the EDP tab and push the + button:

          CD-Pipeline Overview
          CD-Pipeline tab

        2. On the Pipeline tab, define the following values and press the Proceed button:

          • Pipeline name: deploy
          • Deployment type: Container

          Pipeline tab
          Pipeline tab

        3. On the Applications tab, add es-usage application, select master branch, leave Promote in pipeline unchecked and press the Proceed button:

          Pipeline tab
          Pipeline tab

        4. On the Stage tab, add the sit stage with the values below and push the Apply button:

          • Stage name: sit
          • Description: System integration testing
          • Trigger type: Manual. We plan to deploy applications to this environment manually
          • Quality gate type: Manual
          • Step name: approve

            Stage tab
            Stage tab

        Configure RBAC for External Secret Store⚓︎

        Note

        In this scenario, three namespaces are used: demo, which is the namespace where EDP is deployed, demo-vault, which is the vault where developers store secrets, anddemo-deploy-sit, which is the namespace used for deploying the application. The target namespace name for deploying application is formed with the pattern: edp-<cd_pipeline_name>-<stage_name>.

        To make the system to function properly, it is imperative to create the following resources:

        1. Create namespace demo-vault to store secrets:

           kubectl create namespace demo-vault
           
        2. Create Secret:

          apiVersion: v1
           kind: Secret
           metadata:
          @@ -99,4 +99,4 @@
           
          kubectl get externalsecret -n demo-deploy-sit
           NAME    STORE                          REFRESH INTERVAL   STATUS         READY
           mongo   demo                           1h                 SecretSynced   True
          -
        3. In the top right corner, enter the Cluster settings and add demo-deploy-sit to the Allowed namespace.

        4. Navigate EDP Portal -> Configuration -> Secrets and ensure that secret was created:

          Secrets
          Secrets

        5. Navigate EDP Portal -> Workloads -> Pods and select deployed application:

          Pod information
          Pod information

        \ No newline at end of file +
      2. In the top right corner, enter the Cluster settings and add demo-deploy-sit to the Allowed namespace.

      3. Navigate EDP Portal -> Configuration -> Secrets and ensure that secret was created:

        Secrets
        Secrets

      4. Navigate EDP Portal -> Workloads -> Pods and select deployed application:

        Pod information
        Pod information

      \ No newline at end of file diff --git a/use-cases/index.html b/use-cases/index.html index 346249ee6..5b35ec8ff 100644 --- a/use-cases/index.html +++ b/use-cases/index.html @@ -1 +1 @@ - Use Cases - EPAM Delivery Platform

      Use Cases⚓︎

      The Use Cases section provides useful recommendations of how to operate with the EPAM Delivery Platform tools and manage the custom resources. Get acquainted with the description of technical scenarios and solutions.

      • Scaffold And Deploy FastAPI Application

        Rapidly create, customize, and deploy FastAPI applications using a scaffolding tool and standardized processes, streamlining development and enhancing code quality for quicker and reliable feature releases.

        Scaffold Now

      • Bring Your Own Framework

        Facilitate the onboarding of custom tools and frameworks into the EPAM Delivery Platform by integrating custom Tekton libraries, empowering the modification of pipelines and tasks for tailored workflows.

        Summon the Kraken

      • Secrets Management For Application Deployment

        Ensure secure handling of sensitive data by leveraging an external secret store within the EPAM Delivery Platform, allowing secure transmission and usage of confidential information across namespaces, facilitating secure connections to databases during development and deployment, tailored for Developers.

        Run Securely

      • Autotest As the Quality Gate

        Implement autotests as a quality gate within the Continuous Deployment pipeline, verifying application stability and functionality, allowing reliable versions to be promoted while enabling quick creation of applications, streamlined testing, and seamless deployment updates for Developers and Quality Assurance specialists.

        Set Quality Gate

      \ No newline at end of file + Use Cases - EPAM Delivery Platform

      Use Cases⚓︎

      The Use Cases section provides useful recommendations of how to operate with the EPAM Delivery Platform tools and manage the custom resources. Get acquainted with the description of technical scenarios and solutions.

      • Scaffold And Deploy FastAPI Application

        Rapidly create, customize, and deploy FastAPI applications using a scaffolding tool and standardized processes, streamlining development and enhancing code quality for quicker and reliable feature releases.

        Scaffold Now

      • Bring Your Own Framework

        Facilitate the onboarding of custom tools and frameworks into the EPAM Delivery Platform by integrating custom Tekton libraries, empowering the modification of pipelines and tasks for tailored workflows.

        Summon the Kraken

      • Secrets Management For Application Deployment

        Ensure secure handling of sensitive data by leveraging an external secret store within the EPAM Delivery Platform, allowing secure transmission and usage of confidential information across namespaces, facilitating secure connections to databases during development and deployment, tailored for Developers.

        Run Securely

      • Autotest As the Quality Gate

        Implement autotests as a quality gate within the Continuous Deployment pipeline, verifying application stability and functionality, allowing reliable versions to be promoted while enabling quick creation of applications, streamlined testing, and seamless deployment updates for Developers and Quality Assurance specialists.

        Set Quality Gate

      \ No newline at end of file diff --git a/use-cases/tekton-custom-pipelines/index.html b/use-cases/tekton-custom-pipelines/index.html index 007d45ad4..29a4f9557 100644 --- a/use-cases/tekton-custom-pipelines/index.html +++ b/use-cases/tekton-custom-pipelines/index.html @@ -1,4 +1,4 @@ - Deploy Application With Custom Build Tool/Framework - EPAM Delivery Platform

      Deploy Application With Custom Build Tool/Framework⚓︎

      This Use Case describes the procedure of adding custom Tekton libraries that include pipelines with tasks. In addition to it, the process of modifying custom pipelines and tasks is enlightened as well.

      Goals⚓︎

      • Add custom Tekton pipeline library;
      • Modify existing pipelines and tasks in a custom Tekton library.

      Preconditions⚓︎

      • EDP instance with Gerrit and Tekton inside is configured;
      • Developer has access to the EDP instances using the Single-Sign-On approach;
      • Developer has the Administrator role to perform merge in Gerrit.

      Scenario⚓︎

      Note

      This case is based on our predefined repository and application. Your case may be different.

      To create and then modify a custom Tekton library, please follow the steps below:

      Add Custom Application to EDP⚓︎

      1. Open EDP Portal URL. Use the Sign-In option:

        Logging Page
        Logging screen

      2. In the top right corner, enter the Cluster settings and ensure that both Default namespace and Allowed namespace are set:

        Settings
        Cluster settings

      3. Create the new Codebase with the Application type using the Clone strategy. To do this, click the EDP tab:

        Cluster Overview
        Cluster overview

      4. Select the Components section under the EDP tab and push the create + button:

        Components Overview
        Components tab

      5. Select the Application codebase type because is meant to be delivered as a container and deployed inside the Kubernetes cluster. Choose the Clone strategy and this example repository:

        Codebase Info
        Step codebase info

      6. In the Application Info tab, define the following values and click the Proceed button:

        • Application name: tekton-hello-world
        • Default branch: master
        • Application code language: Other
        • Language version/framework: go
        • Build tool: shell

        Application Info
        Application info

        Note

        These application details are required to match the Pipeline name gerrit-shell-go-app-build-default.

        The PipelineRun name is formed with the help of TriggerTemplates in pipelines-library so the Pipeline name should correspond to the following structure:

          pipelineRef:
        + Deploy Application With Custom Build Tool/Framework - EPAM Delivery Platform      

        Deploy Application With Custom Build Tool/Framework⚓︎

        This Use Case describes the procedure of adding custom Tekton libraries that include pipelines with tasks. In addition to it, the process of modifying custom pipelines and tasks is enlightened as well.

        Goals⚓︎

        • Add custom Tekton pipeline library;
        • Modify existing pipelines and tasks in a custom Tekton library.

        Preconditions⚓︎

        • EDP instance with Gerrit and Tekton inside is configured;
        • Developer has access to the EDP instances using the Single-Sign-On approach;
        • Developer has the Administrator role to perform merge in Gerrit.

        Scenario⚓︎

        Note

        This case is based on our predefined repository and application. Your case may be different.

        To create and then modify a custom Tekton library, please follow the steps below:

        Add Custom Application to EDP⚓︎

        1. Open EDP Portal URL. Use the Sign-In option:

          Logging Page
          Logging screen

        2. In the top right corner, enter the Cluster settings and ensure that both Default namespace and Allowed namespace are set:

          Settings
          Cluster settings

        3. Create the new Codebase with the Application type using the Clone strategy. To do this, click the EDP tab:

          Cluster Overview
          Cluster overview

        4. Select the Components section under the EDP tab and push the create + button:

          Components Overview
          Components tab

        5. Select the Application codebase type because is meant to be delivered as a container and deployed inside the Kubernetes cluster. Choose the Clone strategy and this example repository:

          Codebase Info
          Step codebase info

        6. In the Application Info tab, define the following values and click the Proceed button:

          • Application name: tekton-hello-world
          • Default branch: master
          • Application code language: Other
          • Language version/framework: go
          • Build tool: shell

          Application Info
          Application info

          Note

          These application details are required to match the Pipeline name gerrit-shell-go-app-build-default.

          The PipelineRun name is formed with the help of TriggerTemplates in pipelines-library so the Pipeline name should correspond to the following structure:

            pipelineRef:
               name: gerrit-$(tt.params.buildtool)-$(tt.params.framework)-$(tt.params.cbtype)-build-$(tt.params.versioning-type)
           
          The PipelineRun is created as soon as Gerrit (or, if configured, GitHub, GitLab) sends a payload during Merge Request events.
        7. In the Advances Settings tab, define the below values and click the Apply button:

          • CI tool: Tekton
          • Codebase versioning type: default
          • Leave Specify the pattern to validate a commit message empty.

          Advanced Settings
          Advanced settings

        8. Check the application status. It should be green:

          Components overview page
          Application status

          Now that the application is created successfully, proceed to adding the Tekton library.

        Add Tekton Library⚓︎

        1. Select the Components section under the EDP tab and push the create + button:

          Components Overview
          Components tab

        2. Create a new Codebase with the Library type using the Create strategy:

          Codebase Info
          Step codebase info

          Note

          The EDP Create strategy will automatically pull the code for the Tekton Helm application from here.

        3. In the Application Info tab, define the following values and click the Proceed button:

          • Application name: custom-tekton-chart
          • Default branch: master
          • Application code language: Helm
          • Language version/framework: Pipeline
          • Build tool: Helm

          Codebase Info
          Step codebase info

        4. In the Advances Settings tab, define the below values and click the Apply button:

          • CI tool: Tekton
          • Codebase versioning type: default
          • Leave Specify the pattern to validate a commit message empty.

          Advanced Settings
          Advanced settings

        5. Check the codebase status:

          Components overview page
          Codebase status

        Modify Tekton Pipeline⚓︎

        Note

        Our recommendation is to avoid modifying the default Tekton resources. Instead, we suggest creating and modifying your own custom Tekton library.

        Now that the Tekton Helm library is created, it is time to clone, modify and then apply it to the Kubernetes cluster.

        1. Generate SSH key to work with Gerrit repositories:

          ssh-keygen -t ed25519 -C "your_email@example.com"
           
        2. Log into Gerrit UI.

        3. Go to Gerrit Settings -> SSH keys, paste your generated public SSH key to the New SSH key field and click ADD NEW SSH KEY:

          Gerrit settings
          Gerrit settings
          Gerrit settings
          Gerrit settings

        4. Browse Gerrit Repositories and select custom-tekton-chart project:

          Browse Gerrit repositories
          Browse Gerrit repositories

        5. Clone the repository with SSH using Clone with commit-msg hook command:

          Gerrit clone
          Gerrit clone

          Note

          In case of the strict firewall configurations, please use the HTTP protocol to pull and configure the HTTP Credentials in Gerrit.

        6. Examine the repository structure. It should look this way by default:

          custom-tekton-chart
          @@ -75,4 +75,4 @@
           git commit -m "Add Helm chart testing for go-shell application"
           git push origin HEAD:refs/for/master
           
        7. Check the Gerrit code review for the custom Helm chart pipelines repository in Tekton:

          Gerrit code review status
          Gerrit code review status

        8. Go to Changes -> Open, click CODE-REVIEW and submit the merge request:

          Gerrit merge
          Gerrit merge
          Gerrit merge
          Gerrit merge

        9. Check the build Pipeline status for the custom Pipelines Helm chart repository in Tekton:

          Tekton status
          Tekton status

        Create Application Merge Request⚓︎

        Since we applied the Tekton library to the Kubernetes cluster in the previous step, let's test the review and build pipelines for our tekton-hello-world application.

        Perform the below steps to merge new code (Merge Request) that passes the Code Review flow. For the steps below, we use Gerrit UI but the same actions can be performed using the command line and Git tool:

        1. Log into Gerrit UI, select tekton-hello-world project, and create a change request.

        2. Browse Gerrit Repositories and select tekton-hello-world project:

          Browse Gerrit repositories
          Browse Gerrit repositories

        3. Clone the tekton-hello-world repository to make the necessary changes or click the Create Change button in the Commands section of the project to make changes via Gerrit GUI:

          Create Change request
          Create Change request

        4. In the Create Change dialog, provide the branch master, write some text in the Description (commit message) and click the Create button:

          Create Change
          Create Change

        5. Click the Edit button of the merge request and add deployment-templates/values.yaml to modify it and change the ingress.enabled flag from false to true:

          Update values.yaml file
          Update values.yaml file
          Update values.yaml file
          Update values.yaml file

        6. Check the Review Pipeline status. The helm-lint pipeline task should be displayed there:

          Review Change
          Review Change

        7. Review the deployment-templates/values.yaml file and push the SAVE & PUBLISH button. As soon as you get Verified +1 from CI bot, the change is ready for review. Click the Mark as Active and Code-review buttons:

          Review Change
          Review Change

        8. Click the Submit button. Then, your code is merged to the main branch, triggering the Build Pipeline.

          Review Change
          Review Change

          Note

          If the build is added and configured, push steps in the pipeline, it will produce a new version of artifact, which will be available for the deployment in EDP Portal.

        9. Check the pipelines in the Tekton dashboard:

          Tekton custom pipelines
          Tekton custom pipelines
          Tekton custom pipelines
          Tekton custom pipelines

        What happens under the hood:
        1) Gerrit sends a payload during Merge Request event to the Tekton EventListener;
        2) EventListener catches it with the help of Interceptor;
        3) TriggerTemplate creates a PipelineRun.

        The detailed scheme is shown below:

        graph LR;
        -    A[Gerrit events] --> |Payload| B(Tekton EventListener) --> C(Tekton Interceptor CEL filter) --> D(TriggerTemplate)--> E(PipelineRun)

        This chart will be using the core of common-library and pipelines-library and custom resources on the top of them.

        \ No newline at end of file + A[Gerrit events] --> |Payload| B(Tekton EventListener) --> C(Tekton Interceptor CEL filter) --> D(TriggerTemplate)--> E(PipelineRun)

        This chart will be using the core of common-library and pipelines-library and custom resources on the top of them.

      \ No newline at end of file diff --git a/user-guide/add-application/index.html b/user-guide/add-application/index.html index 9491780c6..df35eb66f 100644 --- a/user-guide/add-application/index.html +++ b/user-guide/add-application/index.html @@ -1 +1 @@ - Add Application - EPAM Delivery Platform

      Add Application⚓︎

      KubeRocketCI portal allows you to create an application, clone an existing repository with the application to your Version Control System (VCS), or using an external repository and importing an application to the environment. When an application is created or cloned, the system automatically generates a corresponding repository within the integrated Version Control System. You can create an Application in YAML or via the two-step menu in the dialog.

      To add an application, navigate to the Components section on the navigation bar and click + Create component:

      Create new application
      Create new application

      Once clicked, the Create new component dialog will appear, then select Application and click Next:

      Application info
      Application info

      Choose one of the strategies and click Create:

      Select strategy
      Select strategy

      • Create from template – creates a project on the pattern in accordance with an application language, a build tool, and a framework. This strategy is recommended for projects that start developing their applications from scratch.
      • Import project - allows using existing VCS repository to integrate with KubeRocketCI. While importing the existing repository, select the Git server from the drop-down list and define the relative path to the repository, such as epmd-edp/python-python-flask.

        Note

        In order to use the Import project strategy, make sure to adjust it with the Integrate GitLab/GitHub in Tekton page.

      • Clone project – clones the indicated repository into KubeRocketCI. While cloning the existing repository, it is required to fill in the Repository URL field and specify the credentials if needed:

        Clone application
        Clone application

      Create Application in YAML ⚓︎

      Click Edit YAML in the upper-right corner of the Create Application dialog to open the YAML editor and create the Application.

      Edit YAML
      Edit YAML

      To edit YAML in the minimal editor, turn on the Use minimal editor toggle in the upper-right corner of the Create Application dialog.

      To save the changes, select the Save & Apply button.

      Create Application via UI ⚓︎

      The Create Application dialog contains two steps:

      • The Codebase Info Menu
      • The Advanced Settings Menu

      Codebase Info Menu⚓︎

      Follow the instructions below to fill in the fields of the Codebase Info menu:

      In our example, we will use the Create from template strategy:

      Create application
      Create application

      1. Select all the settings that define how the application will be added to Git server:

        • Git server - the pre-configured server where the component will be hosted. Select one from the from the drop-down list. Please refer to the Manage Git Servers page to learn how to create the one.
        • Repository name - the relative path to the repository, such as epmd-edp/python-python-flask.
        • Component name - the name of the application. Must be at least two characters using the lower-case letters, numbers and inner dashes.
        • Description - brief and concise description that explains the purpose of the application.
        • Empty project - check this box to create a application with an empty repository. The empty repository option is available only for the Create from template strategy.
      2. Specify the application language properties:

        • Application Code Language - defines the code language with its supported frameworks:

          • Java – selecting specific Java version (8,11,17 are available).
          • JavaScript - selecting JavaScript allows using React, Vue, Angular, Express, Next.js and Antora frameworks.
          • Python - selecting Python allows using the Python v.3.8, FastAPI, Flask frameworks.
          • Go - selecting Go allows using the Beego, Gin and Operator SDK frameworks.
          • C# - selecting C# allows using the .Net v.3.1 and .Net v.6.0 frameworks.
          • Helm - selecting Helm allows using the Helm framework.
          • Other - selecting Other allows extending the default code languages when creating a codebase with the clone/import strategy.

          Note

          The Create from template strategy does not allow to customize the default code language set.

        • Language version/framework - defines the specific framework or language version of the application. The field depends on the selected code language.
        • Select Build Tool - allows to choose the build tool to use. A set tools and can be changed in accordance with the selected code language.

          • Java - selecting Java allows using the Gradle or Maven tool.
          • JavaScript - selecting JavaScript allows using the NPM tool.
          • C# - selecting C# allows using the .Net tool.
          • Python - selecting Python allows using Python tool.
          • Go - selecting Go allows using Go tool.
          • Helm - selecting Helm allows using Helm tool.

          Note

          The Select Build Tool field disposes of the default tools and can be changed in accordance with the selected code language.

          Note

          Tekton pipelines offer built-in support for Java Maven Multi-Module projects. These pipelines are capable of recognizing Java deployable modules based on the information in the pom.xml file and performing relevant deployment actions. It's important to note that although the Dockerfile is typically located in the root directory, Kaniko, the tool used for building container images, uses the targets folder within the deployable module's context. For a clear illustration of a Multi-Module project structure, please refer to this example on GitHub, which showcases a commonly used structure for Java Maven Multi-Module projects.

      Advanced Settings Menu⚓︎

      In the Advanced Settings menu, specify the branch options and define the Jira settings:

      Advanced settings
      Advanced settings

      • Default branch - the name of the branch where you want the development to be performed.

        Note

        The default branch cannot be deleted.

      • Codebase versioning type - defines how will the application tag be changed once the new image version is built. There are two versioning types:
        • default: Using the default versioning type, in order to specify the version of the current artifacts, images, and tags in the Version Control System, a developer should navigate to the corresponding file and change the version manually.
        • edp: Using the edp versioning type, a developer indicates the version number from which all the artifacts will be versioned and, as a result, automatically registered in the corresponding file (e.g. pom.xml). When selecting the edp versioning type, the extra fields will appear, type the version number from which you want the artifacts to be versioned:

          Edp versioning
          Edp versioning

          Note

          The Start Version From field should be filled out in compliance with the semantic versioning rules, e.g. 1.2.3 or 10.10.10. Please refer to the Semantic Versioning page for details.

      • Specify the pattern to validate a commit message - the regular expression used to indicate the pattern that is followed on the project to validate a commit message in the code review pipeline. An example of the pattern: ^[PROJECT_NAME-d{4}]:.*$.

        JIRA integration
        JIRA integration

      • Integrate with Jira server - this check box is used in case it is required to connect Jira tickets with the commits and have a respective label in the Fix Version field.

      Note

      To adjust the Jira integration functionality, first apply the necessary changes described on the Adjust Jira Integration page, and Adjust VCS Integration With Jira.

      • Jira Server - the integrated Jira server with related Jira tasks.
      • Specify the pattern to find a Jira ticket number in a commit message - based on this pattern, the value from KubeRocketCI will be displayed in Jira.

        Mapping field name
        Mapping fields

      • Mapping field name - the section where the additional Jira fields are specified the names of the Jira fields that should be filled in with attributes from KubeRocketCI:

        • Select the name of the field in a Jira ticket. The available fields are the following: Fix Version/s, Component/s and Labels.
        • Click the Add button to add the mapping field name.
        • Enter Jira pattern for the field name:

          • For the Fix Version/s field, select the EDP_VERSION variable that represents an EDP upgrade version, as in 2.7.0-SNAPSHOT.Combine variables to make the value more informative. For example, the pattern EDP_VERSION-EDP_COMPONENT will be displayed as 2.7.0-SNAPSHOT-nexus-operator in Jira.
          • For the Component/s field select the EDP_COMPONENT variable that defines the name of the existing repository. For example, nexus-operator.
          • For the Labels field select the EDP_GITTAGvariable that defines a tag assigned to the commit in Git Hub. For example, build/2.7.0-SNAPSHOT.59.
        • Click the bin icon to remove the Jira field name.

      Click the Apply button to add the application to the Components list.

      Note

      After the complete adding of the application, inspect the Manage Applications page to learn how you can operate applications.

      \ No newline at end of file + Add Application - EPAM Delivery Platform

      Add Application⚓︎

      KubeRocketCI portal allows you to create an application, clone an existing repository with the application to your Version Control System (VCS), or using an external repository and importing an application to the environment. When an application is created or cloned, the system automatically generates a corresponding repository within the integrated Version Control System. You can create an Application in YAML or via the two-step menu in the dialog.

      To add an application, navigate to the Components section on the navigation bar and click + Create component:

      Create new application
      Create new application

      Once clicked, the Create new component dialog will appear, then select Application and click Next:

      Application info
      Application info

      Choose one of the strategies and click Create:

      Select strategy
      Select strategy

      • Create from template – creates a project on the pattern in accordance with an application language, a build tool, and a framework. This strategy is recommended for projects that start developing their applications from scratch.
      • Import project - allows using existing VCS repository to integrate with KubeRocketCI. While importing the existing repository, select the Git server from the drop-down list and define the relative path to the repository, such as epmd-edp/python-python-flask.

        Note

        In order to use the Import project strategy, make sure to adjust it with the Integrate GitLab/GitHub in Tekton page.

      • Clone project – clones the indicated repository into KubeRocketCI. While cloning the existing repository, it is required to fill in the Repository URL field and specify the credentials if needed:

        Clone application
        Clone application

      Create Application in YAML ⚓︎

      Click Edit YAML in the upper-right corner of the Create Application dialog to open the YAML editor and create the Application.

      Edit YAML
      Edit YAML

      To edit YAML in the minimal editor, turn on the Use minimal editor toggle in the upper-right corner of the Create Application dialog.

      To save the changes, select the Save & Apply button.

      Create Application via UI ⚓︎

      The Create Application dialog contains two steps:

      • The Codebase Info Menu
      • The Advanced Settings Menu

      Codebase Info Menu⚓︎

      Follow the instructions below to fill in the fields of the Codebase Info menu:

      In our example, we will use the Create from template strategy:

      Create application
      Create application

      1. Select all the settings that define how the application will be added to Git server:

        • Git server - the pre-configured server where the component will be hosted. Select one from the from the drop-down list. Please refer to the Manage Git Servers page to learn how to create the one.
        • Repository name - the relative path to the repository, such as epmd-edp/python-python-flask.
        • Component name - the name of the application. Must be at least two characters using the lower-case letters, numbers and inner dashes.
        • Description - brief and concise description that explains the purpose of the application.
        • Empty project - check this box to create a application with an empty repository. The empty repository option is available only for the Create from template strategy.
      2. Specify the application language properties:

        • Application Code Language - defines the code language with its supported frameworks:

          • Java – selecting specific Java version (8,11,17 are available).
          • JavaScript - selecting JavaScript allows using React, Vue, Angular, Express, Next.js and Antora frameworks.
          • Python - selecting Python allows using the Python v.3.8, FastAPI, Flask frameworks.
          • Go - selecting Go allows using the Beego, Gin and Operator SDK frameworks.
          • C# - selecting C# allows using the .Net v.3.1 and .Net v.6.0 frameworks.
          • Helm - selecting Helm allows using the Helm framework.
          • Other - selecting Other allows extending the default code languages when creating a codebase with the clone/import strategy.

          Note

          The Create from template strategy does not allow to customize the default code language set.

        • Language version/framework - defines the specific framework or language version of the application. The field depends on the selected code language.
        • Select Build Tool - allows to choose the build tool to use. A set tools and can be changed in accordance with the selected code language.

          • Java - selecting Java allows using the Gradle or Maven tool.
          • JavaScript - selecting JavaScript allows using the NPM tool.
          • C# - selecting C# allows using the .Net tool.
          • Python - selecting Python allows using Python tool.
          • Go - selecting Go allows using Go tool.
          • Helm - selecting Helm allows using Helm tool.

          Note

          The Select Build Tool field disposes of the default tools and can be changed in accordance with the selected code language.

          Note

          Tekton pipelines offer built-in support for Java Maven Multi-Module projects. These pipelines are capable of recognizing Java deployable modules based on the information in the pom.xml file and performing relevant deployment actions. It's important to note that although the Dockerfile is typically located in the root directory, Kaniko, the tool used for building container images, uses the targets folder within the deployable module's context. For a clear illustration of a Multi-Module project structure, please refer to this example on GitHub, which showcases a commonly used structure for Java Maven Multi-Module projects.

      Advanced Settings Menu⚓︎

      In the Advanced Settings menu, specify the branch options and define the Jira settings:

      Advanced settings
      Advanced settings

      • Default branch - the name of the branch where you want the development to be performed.

        Note

        The default branch cannot be deleted.

      • Codebase versioning type - defines how will the application tag be changed once the new image version is built. There are two versioning types:
        • default: Using the default versioning type, in order to specify the version of the current artifacts, images, and tags in the Version Control System, a developer should navigate to the corresponding file and change the version manually.
        • edp: Using the edp versioning type, a developer indicates the version number from which all the artifacts will be versioned and, as a result, automatically registered in the corresponding file (e.g. pom.xml). When selecting the edp versioning type, the extra fields will appear, type the version number from which you want the artifacts to be versioned:

          Edp versioning
          Edp versioning

          Note

          The Start Version From field should be filled out in compliance with the semantic versioning rules, e.g. 1.2.3 or 10.10.10. Please refer to the Semantic Versioning page for details.

      • Specify the pattern to validate a commit message - the regular expression used to indicate the pattern that is followed on the project to validate a commit message in the code review pipeline. An example of the pattern: ^[PROJECT_NAME-d{4}]:.*$.

        JIRA integration
        JIRA integration

      • Integrate with Jira server - this check box is used in case it is required to connect Jira tickets with the commits and have a respective label in the Fix Version field.

      Note

      To adjust the Jira integration functionality, first apply the necessary changes described on the Adjust Jira Integration page, and Adjust VCS Integration With Jira.

      • Jira Server - the integrated Jira server with related Jira tasks.
      • Specify the pattern to find a Jira ticket number in a commit message - based on this pattern, the value from KubeRocketCI will be displayed in Jira.

        Mapping field name
        Mapping fields

      • Mapping field name - the section where the additional Jira fields are specified the names of the Jira fields that should be filled in with attributes from KubeRocketCI:

        • Select the name of the field in a Jira ticket. The available fields are the following: Fix Version/s, Component/s and Labels.
        • Click the Add button to add the mapping field name.
        • Enter Jira pattern for the field name:

          • For the Fix Version/s field, select the EDP_VERSION variable that represents an EDP upgrade version, as in 2.7.0-SNAPSHOT.Combine variables to make the value more informative. For example, the pattern EDP_VERSION-EDP_COMPONENT will be displayed as 2.7.0-SNAPSHOT-nexus-operator in Jira.
          • For the Component/s field select the EDP_COMPONENT variable that defines the name of the existing repository. For example, nexus-operator.
          • For the Labels field select the EDP_GITTAGvariable that defines a tag assigned to the commit in Git Hub. For example, build/2.7.0-SNAPSHOT.59.
        • Click the bin icon to remove the Jira field name.

      Click the Apply button to add the application to the Components list.

      Note

      After the complete adding of the application, inspect the Manage Applications page to learn how you can operate applications.

      \ No newline at end of file diff --git a/user-guide/add-autotest/index.html b/user-guide/add-autotest/index.html index 0d325d07d..521f5a908 100644 --- a/user-guide/add-autotest/index.html +++ b/user-guide/add-autotest/index.html @@ -1 +1 @@ - Add Autotest - EPAM Delivery Platform

      Add Autotest⚓︎

      KubeRocketCI portal allows you to clone an existing repository with the autotest to your Version Control System (VCS), or using an external repository and adding an autotest for further running in stages or using them as quality gates for applications. When an autotest is cloned, the system automatically generates a corresponding repository within the integrated VCS. You can create an autotest in YAML or via the two-step menu in the dialog.

      Info

      Please refer to the Add Application section for the details on how to add an application codebase type. For the details on how to use autotests as quality gates, please refer to the Stages Menu section of the Add Environment documentation.

      To add an autotest, navigate to the Components section on the navigation bar and click + Create component:

      Create new autotest
      Create new autotest

      Once clicked, the Create new component dialog will appear, then select Autotest and click Next:

      Create new autotest
      Create new autotest

      Choose one of the strategies and click Create:

      Select strategy
      Select strategy

      • Clone project – clones the indicated repository into KubeRocketCI. While cloning the existing repository, it is required to fill in the Repository URL field and specify the credentials if needed.
      • Import project - allows using existing VCS repository to integrate with KubeRocketCI. While importing the existing repository, select the Git server from the drop-down list and define the relative path to the repository, such as /epmd-edp/examples/basic/edp-auto-tests-simple-example.

        Note

        In order to use the Import project strategy, make sure to adjust it with the Integrate GitLab/GitHub With Tekton page.

      Create Autotest in YAML ⚓︎

      Click Edit YAML in the upper-right corner of the Create Autotest dialog to open the YAML editor and create an autotest:

      Edit YAML
      Edit YAML

      To edit YAML in the minimal editor, turn on the Use minimal editor toggle in the upper-right corner of the Create Autotest dialog.

      To save the changes, select the Save & Apply button.

      Create Autotest via UI ⚓︎

      The Create Autotest dialog contains the two steps:

      • The Codebase Info Menu
      • The Advanced Settings Menu

      The Codebase Info Menu⚓︎

      In our case, we will use the Clone strategy:

      Clone autotest
      Clone autotest

      1. Select all the settings that define how the autotest will be added to Git server:

        • Git server - the pre-configured server where the component will be hosted. Select one from the from the drop-down list. Please refer to the Manage Git Servers page to learn how to create the one.
        • Repository name - the relative path to the repository, such as /epmd-edp/examples/basic/edp-auto-tests-simple-example.
        • Component name - the name of the autotest. Must be at least two characters using the lower-case letters, numbers and inner dashes.
        • Description - brief and concise description that explains the purpose of the autotest.
      2. Specify the autotest language properties:

        • Autotest code language - defines the code language with its supported frameworks. Selecting Other allows extending the default code languages and get the necessary build tool.
        • Language version/framework - defines the specific framework or language version of the autotest. The field depends on the selected code language. Specify Java 8, Java 11 or Java 17 to be used.
        • Build Tool - allows to choose the build tool to use. In case of autotests, Gradle and Maven are available.
        • Autotest report framework - all the autotest reports will be created in the Allure framework by default.

      Click the Proceed button to switch to the next menu.

      The Advanced Settings Menu⚓︎

      In the Advanced Settings menu, specify the branch options and define the Jira settings:

      Advanced settings
      Advanced settings

      • Default branch - the name of the branch where you want the development to be performed.

        Note

        The default branch cannot be deleted.

      • Codebase versioning type - defines how will the autotest tag be changed once the new image version is built. There are two versioning types:

        • default: Using the default versioning type, in order to specify the version of the current artifacts, images, and tags in the Version Control System, a developer should navigate to the corresponding file and change the version manually.
        • edp: Using the edp versioning type, a developer indicates the version number from which all the artifacts will be versioned and, as a result, automatically registered in the corresponding file (e.g. pom.xml). When selecting the edp versioning type, the extra fields will appear, type the version number from which you want the artifacts to be versioned:

        Edp versioning
        Edp versioning

        Type the version number from which you want the artifacts to be versioned.

      Note

      The Start Version From field must be filled out in compliance with the semantic versioning rules, e.g. 1.2.3 or 10.10.10. Please refer to the Semantic Versioning page for details.

      • Specify the pattern to validate a commit message - the regular expression used to indicate the pattern that is followed on the project to validate a commit message in the code review pipeline. An example of the pattern: ^[PROJECT_NAME-d{4}]:.*$.

        Jira integration
        Jira integration

      • Integrate with Jira server - this check box is used in case it is required to connect Jira tickets with the commits and have a respective label in the Fix Version field.

      Note

      To adjust the Jira integration functionality, first apply the necessary changes described on the Adjust Jira Integration page, and Adjust VCS Integration With Jira.

      • Jira Server - the integrated Jira server with related Jira tasks.
      • Specify the pattern to find a Jira ticket number in a commit message - based on this pattern, the value from KubeRocketCI will be displayed in Jira.

        Mapping field name
        Mapping field name

      • Mapping field name - the section where the additional Jira fields are specified the names of the Jira fields that should be filled in with attributes from KubeRocketCI:

        • Select the name of the field in a Jira ticket. The available fields are the following: Fix Version/s, Component/s and Labels.
        • Click the Add button to add the mapping field name.
        • Enter Jira pattern for the field name:

          • For the Fix Version/s field, select the EDP_VERSION variable that represents an KubeRocketCI upgrade version, as in 2.7.0-SNAPSHOT.Combine variables to make the value more informative. For example, the pattern EDP_VERSION-EDP_COMPONENT will be displayed as 2.7.0-SNAPSHOT-nexus-operator in Jira.
          • For the Component/s field select the EDP_COMPONENT variable that defines the name of the existing repository. For example, nexus-operator.
          • For the Labels field select the EDP_GITTAGvariable that defines a tag assigned to the commit in Git Hub. For example, build/2.7.0-SNAPSHOT.59.
        • Click the bin icon to remove the Jira field name.

      After the complete adding of the autotest, inspect the Autotest Overview page to learn how you can operate applications.

      \ No newline at end of file + Add Autotest - EPAM Delivery Platform

      Add Autotest⚓︎

      KubeRocketCI portal allows you to clone an existing repository with the autotest to your Version Control System (VCS), or using an external repository and adding an autotest for further running in stages or using them as quality gates for applications. When an autotest is cloned, the system automatically generates a corresponding repository within the integrated VCS. You can create an autotest in YAML or via the two-step menu in the dialog.

      Info

      Please refer to the Add Application section for the details on how to add an application codebase type. For the details on how to use autotests as quality gates, please refer to the Stages Menu section of the Add Environment documentation.

      To add an autotest, navigate to the Components section on the navigation bar and click + Create component:

      Create new autotest
      Create new autotest

      Once clicked, the Create new component dialog will appear, then select Autotest and click Next:

      Create new autotest
      Create new autotest

      Choose one of the strategies and click Create:

      Select strategy
      Select strategy

      • Clone project – clones the indicated repository into KubeRocketCI. While cloning the existing repository, it is required to fill in the Repository URL field and specify the credentials if needed.
      • Import project - allows using existing VCS repository to integrate with KubeRocketCI. While importing the existing repository, select the Git server from the drop-down list and define the relative path to the repository, such as /epmd-edp/examples/basic/edp-auto-tests-simple-example.

        Note

        In order to use the Import project strategy, make sure to adjust it with the Integrate GitLab/GitHub With Tekton page.

      Create Autotest in YAML ⚓︎

      Click Edit YAML in the upper-right corner of the Create Autotest dialog to open the YAML editor and create an autotest:

      Edit YAML
      Edit YAML

      To edit YAML in the minimal editor, turn on the Use minimal editor toggle in the upper-right corner of the Create Autotest dialog.

      To save the changes, select the Save & Apply button.

      Create Autotest via UI ⚓︎

      The Create Autotest dialog contains the two steps:

      • The Codebase Info Menu
      • The Advanced Settings Menu

      The Codebase Info Menu⚓︎

      In our case, we will use the Clone strategy:

      Clone autotest
      Clone autotest

      1. Select all the settings that define how the autotest will be added to Git server:

        • Git server - the pre-configured server where the component will be hosted. Select one from the from the drop-down list. Please refer to the Manage Git Servers page to learn how to create the one.
        • Repository name - the relative path to the repository, such as /epmd-edp/examples/basic/edp-auto-tests-simple-example.
        • Component name - the name of the autotest. Must be at least two characters using the lower-case letters, numbers and inner dashes.
        • Description - brief and concise description that explains the purpose of the autotest.
      2. Specify the autotest language properties:

        • Autotest code language - defines the code language with its supported frameworks. Selecting Other allows extending the default code languages and get the necessary build tool.
        • Language version/framework - defines the specific framework or language version of the autotest. The field depends on the selected code language. Specify Java 8, Java 11 or Java 17 to be used.
        • Build Tool - allows to choose the build tool to use. In case of autotests, Gradle and Maven are available.
        • Autotest report framework - all the autotest reports will be created in the Allure framework by default.

      Click the Proceed button to switch to the next menu.

      The Advanced Settings Menu⚓︎

      In the Advanced Settings menu, specify the branch options and define the Jira settings:

      Advanced settings
      Advanced settings

      • Default branch - the name of the branch where you want the development to be performed.

        Note

        The default branch cannot be deleted.

      • Codebase versioning type - defines how will the autotest tag be changed once the new image version is built. There are two versioning types:

        • default: Using the default versioning type, in order to specify the version of the current artifacts, images, and tags in the Version Control System, a developer should navigate to the corresponding file and change the version manually.
        • edp: Using the edp versioning type, a developer indicates the version number from which all the artifacts will be versioned and, as a result, automatically registered in the corresponding file (e.g. pom.xml). When selecting the edp versioning type, the extra fields will appear, type the version number from which you want the artifacts to be versioned:

        Edp versioning
        Edp versioning

        Type the version number from which you want the artifacts to be versioned.

      Note

      The Start Version From field must be filled out in compliance with the semantic versioning rules, e.g. 1.2.3 or 10.10.10. Please refer to the Semantic Versioning page for details.

      • Specify the pattern to validate a commit message - the regular expression used to indicate the pattern that is followed on the project to validate a commit message in the code review pipeline. An example of the pattern: ^[PROJECT_NAME-d{4}]:.*$.

        Jira integration
        Jira integration

      • Integrate with Jira server - this check box is used in case it is required to connect Jira tickets with the commits and have a respective label in the Fix Version field.

      Note

      To adjust the Jira integration functionality, first apply the necessary changes described on the Adjust Jira Integration page, and Adjust VCS Integration With Jira.

      • Jira Server - the integrated Jira server with related Jira tasks.
      • Specify the pattern to find a Jira ticket number in a commit message - based on this pattern, the value from KubeRocketCI will be displayed in Jira.

        Mapping field name
        Mapping field name

      • Mapping field name - the section where the additional Jira fields are specified the names of the Jira fields that should be filled in with attributes from KubeRocketCI:

        • Select the name of the field in a Jira ticket. The available fields are the following: Fix Version/s, Component/s and Labels.
        • Click the Add button to add the mapping field name.
        • Enter Jira pattern for the field name:

          • For the Fix Version/s field, select the EDP_VERSION variable that represents an KubeRocketCI upgrade version, as in 2.7.0-SNAPSHOT.Combine variables to make the value more informative. For example, the pattern EDP_VERSION-EDP_COMPONENT will be displayed as 2.7.0-SNAPSHOT-nexus-operator in Jira.
          • For the Component/s field select the EDP_COMPONENT variable that defines the name of the existing repository. For example, nexus-operator.
          • For the Labels field select the EDP_GITTAGvariable that defines a tag assigned to the commit in Git Hub. For example, build/2.7.0-SNAPSHOT.59.
        • Click the bin icon to remove the Jira field name.

      After the complete adding of the autotest, inspect the Autotest Overview page to learn how you can operate applications.

      \ No newline at end of file diff --git a/user-guide/add-cd-pipeline/index.html b/user-guide/add-cd-pipeline/index.html index 7c3bc7f29..58780e349 100644 --- a/user-guide/add-cd-pipeline/index.html +++ b/user-guide/add-cd-pipeline/index.html @@ -1 +1 @@ - Add Environment - EPAM Delivery Platform

      Add Environment⚓︎

      Portal provides the ability to deploy an environment on your own and specify the essential components.

      Navigate to the Environments section on the navigation bar and click Create (the plus sign icon on the right side of the screen). Once clicked, the Create CD Pipeline dialog will appear.

      The creation of the environment becomes available as soon as an application is created including its provisioning in a branch and the necessary entities for the environment. You can create the environment in YAML or via the three-step menu in the dialog.

      Create Environment in YAML ⚓︎

      Click Edit YAML in the upper-right corner of the Create CD Pipeline dialog to open the YAML editor and create the environment.

      Edit YAML
      Edit YAML

      To edit YAML in the minimal editor, turn on the Use minimal editor toggle in the upper-right corner of the Create CD Pipeline dialog.

      To save the changes, select the Save & Apply button.

      Create Environment in the Dialog ⚓︎

      The Create CD Pipeline dialog contains the three steps:

      • The Pipeline Menu
      • The Applications Menu
      • The Stages Menu

      The Pipeline Menu⚓︎

      Before proceeding, ensure to familiarize yourself with the Manage GitOps page as it might be required to add a GitOps repository first before creating an environment:

      Add GitOps repository
      Add GitOps repository

      To create an environment, follow the steps below:

      1. Navigate to the Environments tab and click the + Create Environment button:

        Environments menu
        Environments menu

      2. The Pipeline tab of the Create CD Pipeline menu is presented below:

        Create CD pipeline
        Create CD pipeline

        1. Enter the Environment name that will be displayed in the Environments list.

        2. Click the Proceed button to move onto the Applications tab.

      3. Type the name of the pipeline in the Pipeline Name field by entering at least two characters and by using the lower-case letters, numbers and inner dashes.

        Note

        The namespace created by the environment has the following pattern combination: [kuberocketci namespace]-[environment name]-[stage name]. Please be aware that the namespace length should not exceed 63 symbols.

      4. Select the deployment type from the drop-down list:

        • Container - the pipeline will be deployed in a Docker container;
        • Custom - this mode allows to deploy non-container applications and customize the Init stage of environment.
      5. Click the Proceed button to switch to the next menu.

      The Applications Menu⚓︎

      The Pipeline tab of the Create CD Pipeline menu is presented below:

      Environment applications
      Environment applications

      1. Select the necessary application from the Mapping field name drop-down menu and click Add.
      2. Specify the application parameters:

        • Branch - Select the application branch from the drop-down menu.
        • Promote in pipeline - Select the this check box in order to transfer the application from one to another stage by the specified codebase Docker branch. If the Promote in pipeline check box is not selected, the same codebase Docker stream will be deployed regardless of the stage, i.e. the codebase Docker stream input, which was selected for the pipeline, will always be used.

        Note

        If there is another deployed environment stage with the respective codebase Docker stream (= image stream as an OpenShift term), the pattern combination will be as follows: [pipeline name]-[stage name]-[application name]-[verified].

      3. Click the Proceed button to switch to the next menu.

      The Stages Menu⚓︎

      Stages are created the following way:

      1. On the Stages menu, click the Add Stage button and fill in the necessary fields in the Adding Stage window :

        CD stages
        CD stages

        Adding stage
        Adding stage

        1. Set the proper cluster options:

          • Cluster - Choose the cluster to deploy the stage in;
          • Stage name - Enter the stage name;
          • Namespace - Specify the Kubernetes namespace where the resources will be deployed in. By default, this field is pre-populated automatically but keep in mind that the namespace name must be no longer than 63 symbols;
          • Description - Enter the description for this stage;
          • Trigger type - Select the trigger type. The key benefit of the automatic deploy feature is to keep environments up-to-date. The available trigger types are Manual and Auto. When the Auto trigger type is chosen, the environment will initiate automatically once the image is built. Manual implies that user has to perform deploy manually by clicking the Deploy button in the environment menu. Please refer to the Architecture Scheme of CD Pipeline Operator page for additional details.

            Note

            Automatic deploy will start working only after the first manual deploy.

          • Pipeline template - Choose a predefined blueprint outlining the deployment process for your application. While you have the option to incorporate custom deployment templates by generating a resource of the PipelineTemplate category, you can also opt for one of the preexisting options: with autotests or without.
        2. Select the quality gate options:

          • Quality gate type - Select the quality gate type:
            • Manual - means that the promoting process should be confirmed in Tekton manually;
            • Autotests - means that the promoting process should be confirmed by the successful passing of the autotests.;
          • Step name - Type the step name, which will be displayed in Tekton, for every quality gate;
          • Autotest - Select the previously created autotest name;
          • Autotest branch - Specify a branch for the autotest.

          Note

          Execution sequence. The image promotion and execution of the pipelines depend on the sequence in which the environments are added.

        3. Click the Apply button to display the stage in the Stages menu.

        Continuous delivery menu
        Continuous delivery menu

      2. Click the Apply button to start the provisioning of the pipeline.

      As a result, a new environment will be created in the environments list.

      \ No newline at end of file + Add Environment - EPAM Delivery Platform

      Add Environment⚓︎

      Portal provides the ability to deploy an environment on your own and specify the essential components.

      Navigate to the Environments section on the navigation bar and click Create (the plus sign icon on the right side of the screen). Once clicked, the Create CD Pipeline dialog will appear.

      The creation of the environment becomes available as soon as an application is created including its provisioning in a branch and the necessary entities for the environment. You can create the environment in YAML or via the three-step menu in the dialog.

      Create Environment in YAML ⚓︎

      Click Edit YAML in the upper-right corner of the Create CD Pipeline dialog to open the YAML editor and create the environment.

      Edit YAML
      Edit YAML

      To edit YAML in the minimal editor, turn on the Use minimal editor toggle in the upper-right corner of the Create CD Pipeline dialog.

      To save the changes, select the Save & Apply button.

      Create Environment in the Dialog ⚓︎

      The Create CD Pipeline dialog contains the three steps:

      • The Pipeline Menu
      • The Applications Menu
      • The Stages Menu

      The Pipeline Menu⚓︎

      Before proceeding, ensure to familiarize yourself with the Manage GitOps page as it might be required to add a GitOps repository first before creating an environment:

      Add GitOps repository
      Add GitOps repository

      To create an environment, follow the steps below:

      1. Navigate to the Environments tab and click the + Create Environment button:

        Environments menu
        Environments menu

      2. The Pipeline tab of the Create CD Pipeline menu is presented below:

        Create CD pipeline
        Create CD pipeline

        1. Enter the Environment name that will be displayed in the Environments list.

        2. Click the Proceed button to move onto the Applications tab.

      3. Type the name of the pipeline in the Pipeline Name field by entering at least two characters and by using the lower-case letters, numbers and inner dashes.

        Note

        The namespace created by the environment has the following pattern combination: [kuberocketci namespace]-[environment name]-[stage name]. Please be aware that the namespace length should not exceed 63 symbols.

      4. Select the deployment type from the drop-down list:

        • Container - the pipeline will be deployed in a Docker container;
        • Custom - this mode allows to deploy non-container applications and customize the Init stage of environment.
      5. Click the Proceed button to switch to the next menu.

      The Applications Menu⚓︎

      The Pipeline tab of the Create CD Pipeline menu is presented below:

      Environment applications
      Environment applications

      1. Select the necessary application from the Mapping field name drop-down menu and click Add.
      2. Specify the application parameters:

        • Branch - Select the application branch from the drop-down menu.
        • Promote in pipeline - Select the this check box in order to transfer the application from one to another stage by the specified codebase Docker branch. If the Promote in pipeline check box is not selected, the same codebase Docker stream will be deployed regardless of the stage, i.e. the codebase Docker stream input, which was selected for the pipeline, will always be used.

        Note

        If there is another deployed environment stage with the respective codebase Docker stream (= image stream as an OpenShift term), the pattern combination will be as follows: [pipeline name]-[stage name]-[application name]-[verified].

      3. Click the Proceed button to switch to the next menu.

      The Stages Menu⚓︎

      Stages are created the following way:

      1. On the Stages menu, click the Add Stage button and fill in the necessary fields in the Adding Stage window :

        CD stages
        CD stages

        Adding stage
        Adding stage

        1. Set the proper cluster options:

          • Cluster - Choose the cluster to deploy the stage in;
          • Stage name - Enter the stage name;
          • Namespace - Specify the Kubernetes namespace where the resources will be deployed in. By default, this field is pre-populated automatically but keep in mind that the namespace name must be no longer than 63 symbols;
          • Description - Enter the description for this stage;
          • Trigger type - Select the trigger type. The key benefit of the automatic deploy feature is to keep environments up-to-date. The available trigger types are Manual and Auto. When the Auto trigger type is chosen, the environment will initiate automatically once the image is built. Manual implies that user has to perform deploy manually by clicking the Deploy button in the environment menu. Please refer to the Architecture Scheme of CD Pipeline Operator page for additional details.

            Note

            Automatic deploy will start working only after the first manual deploy.

          • Pipeline template - Choose a predefined blueprint outlining the deployment process for your application. While you have the option to incorporate custom deployment templates by generating a resource of the PipelineTemplate category, you can also opt for one of the preexisting options: with autotests or without.
        2. Select the quality gate options:

          • Quality gate type - Select the quality gate type:
            • Manual - means that the promoting process should be confirmed in Tekton manually;
            • Autotests - means that the promoting process should be confirmed by the successful passing of the autotests.;
          • Step name - Type the step name, which will be displayed in Tekton, for every quality gate;
          • Autotest - Select the previously created autotest name;
          • Autotest branch - Specify a branch for the autotest.

          Note

          Execution sequence. The image promotion and execution of the pipelines depend on the sequence in which the environments are added.

        3. Click the Apply button to display the stage in the Stages menu.

        Continuous delivery menu
        Continuous delivery menu

      2. Click the Apply button to start the provisioning of the pipeline.

      As a result, a new environment will be created in the environments list.

      \ No newline at end of file diff --git a/user-guide/add-cluster/index.html b/user-guide/add-cluster/index.html index 96d4e2534..f358ac5a9 100644 --- a/user-guide/add-cluster/index.html +++ b/user-guide/add-cluster/index.html @@ -1 +1 @@ - Add Cluster - EPAM Delivery Platform

      Add Cluster⚓︎

      This page provides comprehensive instructions on how to integrate a new cluster into the KubeRocketCI workloads. By doing so, it creates an opportunity for users to employ multi-cluster deployment, thereby facilitating the segregation of different environments across various clusters.

      Prerequisites⚓︎

      Before moving ahead, ensure you have already performed the guidelines outlined in the Argo CD Integration page.

      Deploy to Remote Cluster⚓︎

      To deploy an application to a remote cluster, follow the steps below:

      1. Navigate to KubeRocketCI portal -> Configuration -> Clusters and click the + Add cluster button:

        Clusters menu
        Clusters menu

      2. In the drop-down window, specify the required fields:

        • Cluster Name - a unique and descriptive name for the new cluster;
        • Cluster Host - the cluster’s endpoint URL (e.g., example-cluster-domain.com);
        • Cluster Token - a Kubernetes token with permissions to access the cluster. This token is required for proper authorization;
        • Skip TLS verification - allows connect to cluster without cluster certificate verification;
        • Cluster Certificate - a Kubernetes certificate essential for authentication. Obtain this certificate from the configuration file of the user account you intend to use for accessing the cluster.

        Note

        The Cluster Certificate field is hidden if the skip TLS verification option is enabled.

        Add cluster
        Add cluster

      3. Click the Apply button to add the cluster.

      As a result, the Kubernetes secret will be created for further integration and you will be able to select the integrated cluster when creating a new stage:

      Select cluster
      Select cluster

      \ No newline at end of file + Add Cluster - EPAM Delivery Platform

      Add Cluster⚓︎

      This page provides comprehensive instructions on how to integrate a new cluster into the KubeRocketCI workloads. By doing so, it creates an opportunity for users to employ multi-cluster deployment, thereby facilitating the segregation of different environments across various clusters.

      Prerequisites⚓︎

      Before moving ahead, ensure you have already performed the guidelines outlined in the Argo CD Integration page.

      Deploy to Remote Cluster⚓︎

      To deploy an application to a remote cluster, follow the steps below:

      1. Navigate to KubeRocketCI portal -> Configuration -> Clusters and click the + Add cluster button:

        Clusters menu
        Clusters menu

      2. In the drop-down window, specify the required fields:

        • Cluster Name - a unique and descriptive name for the new cluster;
        • Cluster Host - the cluster’s endpoint URL (e.g., example-cluster-domain.com);
        • Cluster Token - a Kubernetes token with permissions to access the cluster. This token is required for proper authorization;
        • Skip TLS verification - allows connect to cluster without cluster certificate verification;
        • Cluster Certificate - a Kubernetes certificate essential for authentication. Obtain this certificate from the configuration file of the user account you intend to use for accessing the cluster.

        Note

        The Cluster Certificate field is hidden if the skip TLS verification option is enabled.

        Add cluster
        Add cluster

      3. Click the Apply button to add the cluster.

      As a result, the Kubernetes secret will be created for further integration and you will be able to select the integrated cluster when creating a new stage:

      Select cluster
      Select cluster

      \ No newline at end of file diff --git a/user-guide/add-git-server/index.html b/user-guide/add-git-server/index.html index bae1b7b72..4343fa348 100644 --- a/user-guide/add-git-server/index.html +++ b/user-guide/add-git-server/index.html @@ -1 +1 @@ - Add Git Server - EPAM Delivery Platform

      Add Git Server⚓︎

      To add a Git server, navigate to the Git servers section on the navigation bar and click Create (the plus sign icon in the lower-right corner of the screen). Once clicked, the Create Git server dialog will appear. You can create a Git server in YAML or via the three-step menu in the dialog.

      Create Git Server in YAML ⚓︎

      Click Edit YAML in the upper-right corner of the Create Git server dialog to open the YAML editor and create a Git server.

      Edit YAML
      Edit YAML

      To edit YAML in the minimal editor, turn on the Use minimal editor toggle in the upper-right corner of the Create Git server dialog.

      To save the changes, select the Save & Apply button.

      Create Git Server in the Dialog ⚓︎

      Fill in the following fields:

      Create Git server
      Create Git server

      • Git provider - select Gerrit, GitLab or GitHub.
      • Host - enter a Git server endpoint.
      • User - enter a user for Git integration.
      • SSH port - enter a Git SSH port.
      • HTTPS port - enter a Git HTTPS port.
      • Private SSH key - enter a private SSH key for Git integration.
      • Access token - enter an access token for Git integration. To generate this token, go to GitLab/GitHub account -> Settings -> SSH and GPG keys -> select New SSH key and add SSH key.

      Click the Apply button to add the Git server to the Git servers list. As a result, the Git Server object and the corresponding secret for further integration will be created.

      \ No newline at end of file + Add Git Server - EPAM Delivery Platform

      Add Git Server⚓︎

      To add a Git server, navigate to the Git servers section on the navigation bar and click Create (the plus sign icon in the lower-right corner of the screen). Once clicked, the Create Git server dialog will appear. You can create a Git server in YAML or via the three-step menu in the dialog.

      Create Git Server in YAML ⚓︎

      Click Edit YAML in the upper-right corner of the Create Git server dialog to open the YAML editor and create a Git server.

      Edit YAML
      Edit YAML

      To edit YAML in the minimal editor, turn on the Use minimal editor toggle in the upper-right corner of the Create Git server dialog.

      To save the changes, select the Save & Apply button.

      Create Git Server in the Dialog ⚓︎

      Fill in the following fields:

      Create Git server
      Create Git server

      • Git provider - select Gerrit, GitLab or GitHub.
      • Host - enter a Git server endpoint.
      • User - enter a user for Git integration.
      • SSH port - enter a Git SSH port.
      • HTTPS port - enter a Git HTTPS port.
      • Private SSH key - enter a private SSH key for Git integration.
      • Access token - enter an access token for Git integration. To generate this token, go to GitLab/GitHub account -> Settings -> SSH and GPG keys -> select New SSH key and add SSH key.

      Click the Apply button to add the Git server to the Git servers list. As a result, the Git Server object and the corresponding secret for further integration will be created.

      \ No newline at end of file diff --git a/user-guide/add-infrastructure/index.html b/user-guide/add-infrastructure/index.html index 646e3653e..55b677d63 100644 --- a/user-guide/add-infrastructure/index.html +++ b/user-guide/add-infrastructure/index.html @@ -1 +1 @@ - Add Infrastructure - EPAM Delivery Platform

      Add Infrastructure⚓︎

      KubeRocketCI portal allows you to create an application, clone an existing repository with the application to your Version Control System (VCS), or using an external repository and importing an application to the environment. When an application is created or cloned, the system automatically generates a corresponding repository within the integrated Version Control System. The functionality of the Infrastructure codebase type is to create resources in cloud provider. You can create an Infrastructure in YAML or via the two-step menu in the dialog.

      To add an infrastructure, navigate to the Components section on the navigation bar and click + Create component:

      Create new infrastructure
      Create new infrastructure

      Once clicked, the Create new component dialog will appear. Select Infrastructure and click Next:

      Infrastructure info
      Infrastructure info

      Choose one of the strategies and click Create:

      Select strategy
      Select strategy

      In the Create new component menu, select the necessary configuration strategy. The choice will define the parameters you will need to specify:

      • Create from template – creates a project on the pattern in accordance with an infrastructure language, a build tool, and a framework.
      • Import project - allows using existing VCS repository to integrate with KubeRocketCI. While importing the existing repository, select the Git server from the drop-down list and define the relative path to the repository, such as epmd-edp/python-python-flask.

      Note

      In order to use the Import project strategy, make sure to adjust it with the Integrate GitLab/GitHub With Tekton page.

      • Clone project – clones the indicated repository into KubeRocketCI. While cloning the existing repository, it is required to fill in the Repository URL field and specify the Repository credentials field if needed:

        Clone infrastructure
        Clone infrastructure

      Create Infrastructure in YAML ⚓︎

      Click Edit YAML in the upper-right corner of the Create Infrastructure dialog to open the YAML editor and create the Infrastructure.

      Edit YAML
      Edit YAML

      To edit YAML in the minimal editor, turn on the Use minimal editor toggle in the upper-right corner of the Create Infrastructure dialog.

      To save the changes, select the Save & Apply button.

      Create Infrastructure via UI ⚓︎

      The Create Infrastructure dialog contains the two steps:

      • The Codebase Info Menu
      • The Advanced Settings Menu

      Codebase Info Menu⚓︎

      In our example, we will use the Create from template strategy.

      1. Select all the settings that define how the infrastructure will be added to Git server:

        Create infrastructure
        Create infrastructure

        • Git server - the pre-configured server where the component will be hosted. Select one from the from the drop-down list. Please refer to the Manage Git Servers page to learn how to create the one.
        • Repository name - the relative path to the repository, such as epmd-edp/python-python-flask.
        • Component name - the name of the infrastructure. Must be at least two characters using the lower-case letters, numbers and inner dashes.
        • Description - brief and concise description that explains the purpose of the infrastructure.
        • Empty project - check this box to create a infrastructure with an empty repository. The empty repository option is available only for the Create from template strategy.
      2. Specify the infrastructure language properties:

        • Infrastructure code language - defines the code language with its supported frameworks.
        • Language version/framework - defines the specific framework or language version of the infrastructure. The field depends on the selected code language.
        • Build Tool - allows to choose the build tool to use. A set tools and can be changed in accordance with the selected code language.

      Advanced Settings Menu⚓︎

      In the Advanced Settings menu, specify the branch options and define the Jira settings:

      Advanced settings
      Advanced settings

      Follow the instructions below to fill in the fields of the Advanced Setting menu:

      • Default branch - the name of the branch where you want the development to be performed.

        Note

        The default branch cannot be deleted.

      • Codebase versioning type - defines how will the infrastructure tag be changed once the new image version is built. There are two versioning types:

        • default: Using the default versioning type, in order to specify the version of the current artifacts, images, and tags in the Version Control System, a developer should navigate to the corresponding file and change the version manually.
        • edp: Using the edp versioning type, a developer indicates the version number from which all the artifacts will be versioned and, as a result, automatically registered in the corresponding file (e.g. pom.xml). When selecting the edp versioning type, the extra fields will appear, type the version number from which you want the artifacts to be versioned:

        Edp versioning
        Edp versioning

        Note

        The Start Version From field should be filled out in compliance with the semantic versioning rules, e.g. 1.2.3 or 10.10.10. Please refer to the Semantic Versioning page for details.

      • Specify the pattern to validate a commit message - the regular expression used to indicate the pattern that is followed on the project to validate a commit message in the code review pipeline. An example of the pattern: ^[PROJECT_NAME-d{4}]:.*$.

        JIRA integration
        JIRA integration

      • Integrate with Jira server - this check box is used in case it is required to connect Jira tickets with the commits and have a respective label in the Fix Version field.

      Note

      To adjust the Jira integration functionality, first apply the necessary changes described on the Adjust Jira Integration page, and Adjust VCS Integration With Jira.

      • Jira Server - the integrated Jira server with related Jira tasks.
      • Specify the pattern to find a Jira ticket number in a commit message - based on this pattern, the value from KubeRocketCI will be displayed in Jira.

        Mapping field name
        Mapping fields

      • Mapping field name - the section where the additional Jira fields are specified the names of the Jira fields that should be filled in with attributes from KubeRocketCI:

        • Select the name of the field in a Jira ticket. The available fields are the following: Fix Version/s, Component/s and Labels.
        • Click the Add button to add the mapping field name.
        • Enter Jira pattern for the field name:

          • For the Fix Version/s field, select the EDP_VERSION variable that represents an EDP upgrade version, as in 2.7.0-SNAPSHOT.Combine variables to make the value more informative. For example, the pattern EDP_VERSION-EDP_COMPONENT will be displayed as 2.7.0-SNAPSHOT-nexus-operator in Jira.
          • For the Component/s field select the EDP_COMPONENT variable that defines the name of the existing repository. For example, nexus-operator.
          • For the Labels field select the EDP_GITTAGvariable that defines a tag assigned to the commit in Git Hub. For example, build/2.7.0-SNAPSHOT.59.
        • Click the bin icon to remove the Jira field name.

      Click the Apply button to add the infrastructure to the Components list.

      Note

      After the complete adding of the application, inspect the Manage Infrastructures page to learn how you can operate with infrastructure codebase types.

      \ No newline at end of file + Add Infrastructure - EPAM Delivery Platform

      Add Infrastructure⚓︎

      KubeRocketCI portal allows you to create an application, clone an existing repository with the application to your Version Control System (VCS), or using an external repository and importing an application to the environment. When an application is created or cloned, the system automatically generates a corresponding repository within the integrated Version Control System. The functionality of the Infrastructure codebase type is to create resources in cloud provider. You can create an Infrastructure in YAML or via the two-step menu in the dialog.

      To add an infrastructure, navigate to the Components section on the navigation bar and click + Create component:

      Create new infrastructure
      Create new infrastructure

      Once clicked, the Create new component dialog will appear. Select Infrastructure and click Next:

      Infrastructure info
      Infrastructure info

      Choose one of the strategies and click Create:

      Select strategy
      Select strategy

      In the Create new component menu, select the necessary configuration strategy. The choice will define the parameters you will need to specify:

      • Create from template – creates a project on the pattern in accordance with an infrastructure language, a build tool, and a framework.
      • Import project - allows using existing VCS repository to integrate with KubeRocketCI. While importing the existing repository, select the Git server from the drop-down list and define the relative path to the repository, such as epmd-edp/python-python-flask.

      Note

      In order to use the Import project strategy, make sure to adjust it with the Integrate GitLab/GitHub With Tekton page.

      • Clone project – clones the indicated repository into KubeRocketCI. While cloning the existing repository, it is required to fill in the Repository URL field and specify the Repository credentials field if needed:

        Clone infrastructure
        Clone infrastructure

      Create Infrastructure in YAML ⚓︎

      Click Edit YAML in the upper-right corner of the Create Infrastructure dialog to open the YAML editor and create the Infrastructure.

      Edit YAML
      Edit YAML

      To edit YAML in the minimal editor, turn on the Use minimal editor toggle in the upper-right corner of the Create Infrastructure dialog.

      To save the changes, select the Save & Apply button.

      Create Infrastructure via UI ⚓︎

      The Create Infrastructure dialog contains the two steps:

      • The Codebase Info Menu
      • The Advanced Settings Menu

      Codebase Info Menu⚓︎

      In our example, we will use the Create from template strategy.

      1. Select all the settings that define how the infrastructure will be added to Git server:

        Create infrastructure
        Create infrastructure

        • Git server - the pre-configured server where the component will be hosted. Select one from the from the drop-down list. Please refer to the Manage Git Servers page to learn how to create the one.
        • Repository name - the relative path to the repository, such as epmd-edp/python-python-flask.
        • Component name - the name of the infrastructure. Must be at least two characters using the lower-case letters, numbers and inner dashes.
        • Description - brief and concise description that explains the purpose of the infrastructure.
        • Empty project - check this box to create a infrastructure with an empty repository. The empty repository option is available only for the Create from template strategy.
      2. Specify the infrastructure language properties:

        • Infrastructure code language - defines the code language with its supported frameworks.
        • Language version/framework - defines the specific framework or language version of the infrastructure. The field depends on the selected code language.
        • Build Tool - allows to choose the build tool to use. A set tools and can be changed in accordance with the selected code language.

      Advanced Settings Menu⚓︎

      In the Advanced Settings menu, specify the branch options and define the Jira settings:

      Advanced settings
      Advanced settings

      Follow the instructions below to fill in the fields of the Advanced Setting menu:

      • Default branch - the name of the branch where you want the development to be performed.

        Note

        The default branch cannot be deleted.

      • Codebase versioning type - defines how will the infrastructure tag be changed once the new image version is built. There are two versioning types:

        • default: Using the default versioning type, in order to specify the version of the current artifacts, images, and tags in the Version Control System, a developer should navigate to the corresponding file and change the version manually.
        • edp: Using the edp versioning type, a developer indicates the version number from which all the artifacts will be versioned and, as a result, automatically registered in the corresponding file (e.g. pom.xml). When selecting the edp versioning type, the extra fields will appear, type the version number from which you want the artifacts to be versioned:

        Edp versioning
        Edp versioning

        Note

        The Start Version From field should be filled out in compliance with the semantic versioning rules, e.g. 1.2.3 or 10.10.10. Please refer to the Semantic Versioning page for details.

      • Specify the pattern to validate a commit message - the regular expression used to indicate the pattern that is followed on the project to validate a commit message in the code review pipeline. An example of the pattern: ^[PROJECT_NAME-d{4}]:.*$.

        JIRA integration
        JIRA integration

      • Integrate with Jira server - this check box is used in case it is required to connect Jira tickets with the commits and have a respective label in the Fix Version field.

      Note

      To adjust the Jira integration functionality, first apply the necessary changes described on the Adjust Jira Integration page, and Adjust VCS Integration With Jira.

      • Jira Server - the integrated Jira server with related Jira tasks.
      • Specify the pattern to find a Jira ticket number in a commit message - based on this pattern, the value from KubeRocketCI will be displayed in Jira.

        Mapping field name
        Mapping fields

      • Mapping field name - the section where the additional Jira fields are specified the names of the Jira fields that should be filled in with attributes from KubeRocketCI:

        • Select the name of the field in a Jira ticket. The available fields are the following: Fix Version/s, Component/s and Labels.
        • Click the Add button to add the mapping field name.
        • Enter Jira pattern for the field name:

          • For the Fix Version/s field, select the EDP_VERSION variable that represents an EDP upgrade version, as in 2.7.0-SNAPSHOT.Combine variables to make the value more informative. For example, the pattern EDP_VERSION-EDP_COMPONENT will be displayed as 2.7.0-SNAPSHOT-nexus-operator in Jira.
          • For the Component/s field select the EDP_COMPONENT variable that defines the name of the existing repository. For example, nexus-operator.
          • For the Labels field select the EDP_GITTAGvariable that defines a tag assigned to the commit in Git Hub. For example, build/2.7.0-SNAPSHOT.59.
        • Click the bin icon to remove the Jira field name.

      Click the Apply button to add the infrastructure to the Components list.

      Note

      After the complete adding of the application, inspect the Manage Infrastructures page to learn how you can operate with infrastructure codebase types.

      \ No newline at end of file diff --git a/user-guide/add-library/index.html b/user-guide/add-library/index.html index a3ad4f222..13eb01bde 100644 --- a/user-guide/add-library/index.html +++ b/user-guide/add-library/index.html @@ -1 +1 @@ - Add Library - EPAM Delivery Platform

      Add Library⚓︎

      KubeRocketCI portal allows you to create a library, clone an existing repository with the library to your Version Control System (VCS), or using an external repository and importing a library to the environment. When a library is created or cloned, the system automatically generates a corresponding repository within the integrated VCS. You can create a library in YAML or via the two-step menu in the dialog.

      To add a library, navigate to the Components section on the navigation bar and click + Create component:

      Create new library
      Create new library

      Once clicked, the Create new component dialog will appear. Select Library and click Next:

      Edit YAML
      Create new component menu

      Choose one of the strategies and click Create:

      Select strategy
      Select strategy

      In the Create new component menu, select the necessary configuration strategy. The choice will define the parameters you will need to specify:

      • Create from template – creates a project on the pattern in accordance with a library language, a build tool, and a framework.
      • Import project - allows using existing VCS repository to integrate with KubeRocketCI. While importing the existing repository, select the Git server from the drop-down list and define the relative path to the repository, such as epmd-edp/python-python-flask.

      Note

      In order to use the Import project strategy, make sure to adjust it with the Integrate GitLab/GitHub With Tekton page.

      • Clone project – clones the indicated repository into KubeRocketCI. While cloning the existing repository, it is required to fill in the Repository URL field and specify the Repository credentials field if needed:

        Clone library
        Clone library

      Create Library in YAML ⚓︎

      Click Edit YAML in the upper-right corner of the Create Library dialog to open the YAML editor and create the library:

      Edit YAML
      Edit YAML

      To edit YAML in the minimal editor, turn on the Use minimal editor toggle in the upper-right corner of the Create Application dialog.

      To save the changes, select the Save & Apply button.

      Create Library via UI ⚓︎

      The Create Library dialog contains the two steps:

      • The Codebase Info Menu
      • The Advanced Settings Menu

      The Codebase Info Menu⚓︎

      In our example, we will use the Create from template strategy:

      Create library
      Create library

      1. Select all the settings that define how the library will be added to Git server:

        • Git server - the pre-configured server where the component will be hosted. Select one from the from the drop-down list. Please refer to the Manage Git Servers page to learn how to create the one.
        • Repository name - the relative path to the repository, such as epmd-edp/python-python-flask.
        • Component name - the name of the library. Must be at least two characters using the lower-case letters, numbers and inner dashes.
        • Description - brief and concise description that explains the purpose of the library.
        • Empty project - check this box to create a library with an empty repository. The empty repository option is available only for the Create from template strategy.
      2. Specify the library language properties:

        • Library code language - defines the code language with its supported frameworks:

          • Java – selecting specific Java version available.
          • JavaScript - selecting JavaScript allows using the NPM tool.
          • Python - selecting Python allows using the Python v.3.8, FastAPI, Flask.
          • Groovy-pipeline - selecting Groovy-pipeline allows having the ability to customize a stages logic.
          • Terraform - selecting Terraform allows using the Terraform different versions via the Terraform version manager (tfenv). KubeRocketCI supports all the actions available in Terraform, thus providing the ability to modify the virtual infrastructure and launch some checks with the help of linters. For details, please refer to the Use Terraform Library in KubeRocketCI page.
          • Rego - this option allows using Rego code language with an Open Policy Agent (OPA) Library. For details, please refer to the Use Open Policy Agent page.
          • Container - this option allows using the Kaniko tool for building the container images from a Dockerfile.
          • Helm - this option allows using the chart testing lint (Pipeline) for Helm charts or using Helm chart as a set of other Helm charts organized according to the example.
          • C# - selecting C# allows using .Net v.3.1 and .Net v.6.0.
          • Other - selecting Other allows extending the default code languages when creating a codebase with the Clone/Import strategy.

        Note

        The Create strategy does not allow to customize the default code language set.

        • Language version/framework - defines the specific framework or language version of the library. The field depends on the selected code language.
        • Build Tool - allows to choose the build tool to use. A set tools and can be changed in accordance with the selected code language.

      Click the Proceed button to switch to the next menu.

      The Advanced Settings Menu⚓︎

      In the Advanced Settings menu, specify the branch options and define the Jira settings:

      Advanced settings
      Advanced settings

      • Default branch - the name of the branch where you want the development to be performed.

        Note

        The default branch cannot be deleted.

      • Codebase versioning type - defines how will the library tag be changed once the new image version is built. There are two versioning types:
        • default: Using the default versioning type, in order to specify the version of the current artifacts, images, and tags in the Version Control System, a developer should navigate to the corresponding file and change the version manually.
        • edp: Using the edp versioning type, a developer indicates the version number from which all the artifacts will be versioned and, as a result, automatically registered in the corresponding file (e.g. pom.xml). When selecting the edp versioning type, the extra fields will appear, type the version number from which you want the artifacts to be versioned:

          EDP versioning
          EDP versioning

          Note

          The Start Version From field should be filled out in compliance with the semantic versioning rules, e.g. 1.2.3 or 10.10.10. Please refer to the Semantic Versioning page for details.

      • Specify the pattern to validate a commit message - the regular expression used to indicate the pattern that is followed on the project to validate a commit message in the code review pipeline. An example of the pattern: ^[PROJECT_NAME-d{4}]:.*$.

        Integrate with Jira server
        Integrate with Jira server

      • Integrate with Jira server - this check box is used in case it is required to connect Jira tickets with the commits and have a respective label in the Fix Version field.

      Note

      To adjust the Jira integration functionality, first apply the necessary changes described on the Adjust Jira Integration page, and Adjust VCS Integration With Jira.

      • Jira Server - the integrated Jira server with related Jira tasks.
      • Specify the pattern to find a Jira ticket number in a commit message - based on this pattern, the value from KubeRocketCI will be displayed in Jira.

        Mapping fields
        Mapping fields

      • Mapping field name - the section where the additional Jira fields are specified the names of the Jira fields that should be filled in with attributes from KubeRocketCI:

        • Select the name of the field in a Jira ticket. The available fields are the following: Fix Version/s, Component/s and Labels.
        • Click the Add button to add the mapping field name.
        • Enter Jira pattern for the field name:

          • For the Fix Version/s field, select the EDP_VERSION variable that represents an EDP upgrade version, as in 2.7.0-SNAPSHOT.Combine variables to make the value more informative. For example, the pattern EDP_VERSION-EDP_COMPONENT will be displayed as 2.7.0-SNAPSHOT-nexus-operator in Jira.
          • For the Component/s field select the EDP_COMPONENT variable that defines the name of the existing repository. For example, nexus-operator.
          • For the Labels field select the EDP_GITTAGvariable that defines a tag assigned to the commit in Git Hub. For example, build/2.7.0-SNAPSHOT.59.
        • Click the bin icon to remove the Jira field name.

      Click the Apply button to add the library to the Components list.

      After the complete adding of the library, inspect the Library Overview page to learn how you can operate applications.

      \ No newline at end of file + Add Library - EPAM Delivery Platform

      Add Library⚓︎

      KubeRocketCI portal allows you to create a library, clone an existing repository with the library to your Version Control System (VCS), or using an external repository and importing a library to the environment. When a library is created or cloned, the system automatically generates a corresponding repository within the integrated VCS. You can create a library in YAML or via the two-step menu in the dialog.

      To add a library, navigate to the Components section on the navigation bar and click + Create component:

      Create new library
      Create new library

      Once clicked, the Create new component dialog will appear. Select Library and click Next:

      Edit YAML
      Create new component menu

      Choose one of the strategies and click Create:

      Select strategy
      Select strategy

      In the Create new component menu, select the necessary configuration strategy. The choice will define the parameters you will need to specify:

      • Create from template – creates a project on the pattern in accordance with a library language, a build tool, and a framework.
      • Import project - allows using existing VCS repository to integrate with KubeRocketCI. While importing the existing repository, select the Git server from the drop-down list and define the relative path to the repository, such as epmd-edp/python-python-flask.

      Note

      In order to use the Import project strategy, make sure to adjust it with the Integrate GitLab/GitHub With Tekton page.

      • Clone project – clones the indicated repository into KubeRocketCI. While cloning the existing repository, it is required to fill in the Repository URL field and specify the Repository credentials field if needed:

        Clone library
        Clone library

      Create Library in YAML ⚓︎

      Click Edit YAML in the upper-right corner of the Create Library dialog to open the YAML editor and create the library:

      Edit YAML
      Edit YAML

      To edit YAML in the minimal editor, turn on the Use minimal editor toggle in the upper-right corner of the Create Application dialog.

      To save the changes, select the Save & Apply button.

      Create Library via UI ⚓︎

      The Create Library dialog contains the two steps:

      • The Codebase Info Menu
      • The Advanced Settings Menu

      The Codebase Info Menu⚓︎

      In our example, we will use the Create from template strategy:

      Create library
      Create library

      1. Select all the settings that define how the library will be added to Git server:

        • Git server - the pre-configured server where the component will be hosted. Select one from the from the drop-down list. Please refer to the Manage Git Servers page to learn how to create the one.
        • Repository name - the relative path to the repository, such as epmd-edp/python-python-flask.
        • Component name - the name of the library. Must be at least two characters using the lower-case letters, numbers and inner dashes.
        • Description - brief and concise description that explains the purpose of the library.
        • Empty project - check this box to create a library with an empty repository. The empty repository option is available only for the Create from template strategy.
      2. Specify the library language properties:

        • Library code language - defines the code language with its supported frameworks:

          • Java – selecting specific Java version available.
          • JavaScript - selecting JavaScript allows using the NPM tool.
          • Python - selecting Python allows using the Python v.3.8, FastAPI, Flask.
          • Groovy-pipeline - selecting Groovy-pipeline allows having the ability to customize a stages logic.
          • Terraform - selecting Terraform allows using the Terraform different versions via the Terraform version manager (tfenv). KubeRocketCI supports all the actions available in Terraform, thus providing the ability to modify the virtual infrastructure and launch some checks with the help of linters. For details, please refer to the Use Terraform Library in KubeRocketCI page.
          • Rego - this option allows using Rego code language with an Open Policy Agent (OPA) Library. For details, please refer to the Use Open Policy Agent page.
          • Container - this option allows using the Kaniko tool for building the container images from a Dockerfile.
          • Helm - this option allows using the chart testing lint (Pipeline) for Helm charts or using Helm chart as a set of other Helm charts organized according to the example.
          • C# - selecting C# allows using .Net v.3.1 and .Net v.6.0.
          • Other - selecting Other allows extending the default code languages when creating a codebase with the Clone/Import strategy.

        Note

        The Create strategy does not allow to customize the default code language set.

        • Language version/framework - defines the specific framework or language version of the library. The field depends on the selected code language.
        • Build Tool - allows to choose the build tool to use. A set tools and can be changed in accordance with the selected code language.

      Click the Proceed button to switch to the next menu.

      The Advanced Settings Menu⚓︎

      In the Advanced Settings menu, specify the branch options and define the Jira settings:

      Advanced settings
      Advanced settings

      • Default branch - the name of the branch where you want the development to be performed.

        Note

        The default branch cannot be deleted.

      • Codebase versioning type - defines how will the library tag be changed once the new image version is built. There are two versioning types:
        • default: Using the default versioning type, in order to specify the version of the current artifacts, images, and tags in the Version Control System, a developer should navigate to the corresponding file and change the version manually.
        • edp: Using the edp versioning type, a developer indicates the version number from which all the artifacts will be versioned and, as a result, automatically registered in the corresponding file (e.g. pom.xml). When selecting the edp versioning type, the extra fields will appear, type the version number from which you want the artifacts to be versioned:

          EDP versioning
          EDP versioning

          Note

          The Start Version From field should be filled out in compliance with the semantic versioning rules, e.g. 1.2.3 or 10.10.10. Please refer to the Semantic Versioning page for details.

      • Specify the pattern to validate a commit message - the regular expression used to indicate the pattern that is followed on the project to validate a commit message in the code review pipeline. An example of the pattern: ^[PROJECT_NAME-d{4}]:.*$.

        Integrate with Jira server
        Integrate with Jira server

      • Integrate with Jira server - this check box is used in case it is required to connect Jira tickets with the commits and have a respective label in the Fix Version field.

      Note

      To adjust the Jira integration functionality, first apply the necessary changes described on the Adjust Jira Integration page, and Adjust VCS Integration With Jira.

      • Jira Server - the integrated Jira server with related Jira tasks.
      • Specify the pattern to find a Jira ticket number in a commit message - based on this pattern, the value from KubeRocketCI will be displayed in Jira.

        Mapping fields
        Mapping fields

      • Mapping field name - the section where the additional Jira fields are specified the names of the Jira fields that should be filled in with attributes from KubeRocketCI:

        • Select the name of the field in a Jira ticket. The available fields are the following: Fix Version/s, Component/s and Labels.
        • Click the Add button to add the mapping field name.
        • Enter Jira pattern for the field name:

          • For the Fix Version/s field, select the EDP_VERSION variable that represents an EDP upgrade version, as in 2.7.0-SNAPSHOT.Combine variables to make the value more informative. For example, the pattern EDP_VERSION-EDP_COMPONENT will be displayed as 2.7.0-SNAPSHOT-nexus-operator in Jira.
          • For the Component/s field select the EDP_COMPONENT variable that defines the name of the existing repository. For example, nexus-operator.
          • For the Labels field select the EDP_GITTAGvariable that defines a tag assigned to the commit in Git Hub. For example, build/2.7.0-SNAPSHOT.59.
        • Click the bin icon to remove the Jira field name.

      Click the Apply button to add the library to the Components list.

      After the complete adding of the library, inspect the Library Overview page to learn how you can operate applications.

      \ No newline at end of file diff --git a/user-guide/add-marketplace/index.html b/user-guide/add-marketplace/index.html index dcd511f1c..b35f9c363 100644 --- a/user-guide/add-marketplace/index.html +++ b/user-guide/add-marketplace/index.html @@ -1 +1 @@ - Add Component via Marketplace - EPAM Delivery Platform

      Add Component via Marketplace⚓︎

      With the built-in Marketplace, users can easily create a new application by clicking several buttons. This page contains detailed guidelines on how to create a new component with the help of the Marketplace feature.

      Add Component⚓︎

      To create a component from template, follow the instructions below:

      1. Navigate to the Marketplace section on the navigation bar to see the Marketplace overview page.

      2. Select the component, open its details window and click Create from template:

        Create from template
        Create from template

      3. Fill in the required fields and click Apply:

        Creating from template window
        Creating from template window

      4. As a result, new component will appear in the Components section:

        Creating from template window
        Creating from template window

      \ No newline at end of file + Add Component via Marketplace - EPAM Delivery Platform

      Add Component via Marketplace⚓︎

      With the built-in Marketplace, users can easily create a new application by clicking several buttons. This page contains detailed guidelines on how to create a new component with the help of the Marketplace feature.

      Add Component⚓︎

      To create a component from template, follow the instructions below:

      1. Navigate to the Marketplace section on the navigation bar to see the Marketplace overview page.

      2. Select the component, open its details window and click Create from template:

        Create from template
        Create from template

      3. Fill in the required fields and click Apply:

        Creating from template window
        Creating from template window

      4. As a result, new component will appear in the Components section:

        Creating from template window
        Creating from template window

      \ No newline at end of file diff --git a/user-guide/add-quality-gate/index.html b/user-guide/add-quality-gate/index.html index 36971c65d..8b736e76b 100644 --- a/user-guide/add-quality-gate/index.html +++ b/user-guide/add-quality-gate/index.html @@ -1,4 +1,4 @@ - Add Quality Gate - EPAM Delivery Platform

      Add Quality Gate⚓︎

      This section describes how to use quality gate in KubeRocketCI and how to customize the quality gate for the CD pipeline with the selected build version of the promoted application between stages.

      Apply New Quality Gate to Pipelines⚓︎

      Quality gate pipeline is a usual Tekton pipeline but with a specific label: app.edp.epam.com/pipelinetype: deploy. To add and apply the quality gate to your pipelines, follow the steps below:

      1. To use the Tekton pipeline as a quality gate pipeline, add this label to the pipelines:

      metadata:
      + Add Quality Gate - EPAM Delivery Platform      

      Add Quality Gate⚓︎

      This section describes how to use quality gate in KubeRocketCI and how to customize the quality gate for the CD pipeline with the selected build version of the promoted application between stages.

      Apply New Quality Gate to Pipelines⚓︎

      Quality gate pipeline is a usual Tekton pipeline but with a specific label: app.edp.epam.com/pipelinetype: deploy. To add and apply the quality gate to your pipelines, follow the steps below:

      1. To use the Tekton pipeline as a quality gate pipeline, add this label to the pipelines:

      metadata:
         labels:
           app.edp.epam.com/pipelinetype: deploy
       
      2. Insert the value that is the quality gate name displayed in the quality gate drop-down list of the CD pipeline menu:
      metadata:
      @@ -76,4 +76,4 @@
              taskRef:
                kind: Task
                name: promote-images
      -

      Run Quality Gate⚓︎

      Before running the quality gate, first of all, ensure that the environment has deployed the created CD pipeline and then ensure that the application is successfully deployed and ready to run the quality gate. To run quality gate, please follow the steps below:

      1. Check the CD pipeline status. To do this, open the created CD pipeline, select Image stream version, click DEPLOY button and wait until Applications, Health and Sync statuses become green. This implies that the application is successfully deployed and ready to run the quality gate.

        CD pipeline stage overview
        CD pipeline stage overview

      2. Select the name-of-quality-gate of Quality gates from the drop-down list and click the RUN button.
        The execution process should be started in the Pipelines menu:

        Quality gate pipeline status
        Quality gate pipeline status

      Add Stage for Quality Gate⚓︎

      For a better understanding of this section, please read the documentation about how to add a new stage for quality gate.
      The scheme below illustrates two approaches of adding quality gates:

      Quality gate promote a scheme
      Types of adding quality gate

      • The first type of adding a quality gate is about adding the specific quality gate to the specific pipeline stage.
      • The second type is rather optional and implies activating the Promote in pipelines option while creating a CD Pipeline to pass the quality gate in a certain sequence.

      As a result, after the quality gate is successfully passed, the projected image is promoted to the next stage.

      \ No newline at end of file +

      Run Quality Gate⚓︎

      Before running the quality gate, first of all, ensure that the environment has deployed the created CD pipeline and then ensure that the application is successfully deployed and ready to run the quality gate. To run quality gate, please follow the steps below:

      1. Check the CD pipeline status. To do this, open the created CD pipeline, select Image stream version, click DEPLOY button and wait until Applications, Health and Sync statuses become green. This implies that the application is successfully deployed and ready to run the quality gate.

        CD pipeline stage overview
        CD pipeline stage overview

      2. Select the name-of-quality-gate of Quality gates from the drop-down list and click the RUN button.
        The execution process should be started in the Pipelines menu:

        Quality gate pipeline status
        Quality gate pipeline status

      Add Stage for Quality Gate⚓︎

      For a better understanding of this section, please read the documentation about how to add a new stage for quality gate.
      The scheme below illustrates two approaches of adding quality gates:

      Quality gate promote a scheme
      Types of adding quality gate

      • The first type of adding a quality gate is about adding the specific quality gate to the specific pipeline stage.
      • The second type is rather optional and implies activating the Promote in pipelines option while creating a CD Pipeline to pass the quality gate in a certain sequence.

      As a result, after the quality gate is successfully passed, the projected image is promoted to the next stage.

      \ No newline at end of file diff --git a/user-guide/application/index.html b/user-guide/application/index.html index 51678999b..12639bf77 100644 --- a/user-guide/application/index.html +++ b/user-guide/application/index.html @@ -1 +1 @@ - Manage Applications - EPAM Delivery Platform

      Manage Applications⚓︎

      This section describes the subsequent possible actions that can be performed with the newly added or existing applications.

      Check and Remove Application⚓︎

      As soon as the application is successfully provisioned, the following will be created:

      • An Application Codebase type will appear in the Codebase list of the Components section.
      • With the Create strategy, a new project will be generated on GitHub or another integrated VCS. When Clone is chosen, the repository will be forked from the original and copied to the KubeRocketCI-integrated repository. If Import is selected, the platform connects to the chosen repository.

      The added application will be listed in the Applications list allowing you to do the following:

      Applications menu
      Applications menu

      • Application status - displays the application status. Can be red or green depending on if the KubeRocketCI portal managed to connect to the Git Server with the specified credentials or not.
      • Application name (clickable) - displays the application name set during the application creation.
      • Open documentation - opens the application related documentation page.
      • Enable filtering - enables filtering by application name and namespace where this custom resource is located in.
      • Create new application - displays the Create new component menu.
      • Edit application - edit the application by selecting the options icon next to its name in the applications list, and then selecting Edit. For details see the Edit Existing Application section.
      • Delete application - remove application by clicking the vertical ellipsis button and then selecting Delete.

        Note

        The application that is used in a CD pipeline cannot be removed.

      There are also options to sort the applications:

      • Sort the existing applications in a table by clicking the sorting icons in the table header. Sort the applications alphabetically by their name, language, build tool, framework, and CI tool. You can also sort the applications by their status: Created, Failed, or In progress.
      • Select a number of applications displayed per page (15, 25 or 50 rows) and navigate between pages if the number of applications exceeds the capacity of a single page:

        Applications pages
        Applications pages

      Edit Existing Application⚓︎

      KubeRocketCI Portal provides the ability to enable, disable or edit the Jira Integration functionality for applications.

      1. To edit an application directly from the Applications overview page or when viewing the application data:

        • Select Edit in the options icon menu:

        Edit application on the Applications overview page
        Edit application on the Applications overview page

        Edit application when viewing the application data
        Edit application when viewing the application data

        • The Edit Application dialog opens.
      2. To enable Jira integration, in the Edit Application dialog do the following:

        Edit application
        Edit application

        a. Mark the Integrate with Jira server check box and fill in the necessary fields. Please see steps d-h of the Add Application page.

        b. Select the Apply button to apply the changes.

      3. To disable Jira integration, in the Edit Application dialog do the following:

        a. Clear the Integrate with Jira server check box.

        b. Select the Apply button to apply the changes.

      4. To create, edit and delete application branches, please refer to the Manage Branches page.

      \ No newline at end of file + Manage Applications - EPAM Delivery Platform

      Manage Applications⚓︎

      This section describes the subsequent possible actions that can be performed with the newly added or existing applications.

      Check and Remove Application⚓︎

      As soon as the application is successfully provisioned, the following will be created:

      • An Application Codebase type will appear in the Codebase list of the Components section.
      • With the Create strategy, a new project will be generated on GitHub or another integrated VCS. When Clone is chosen, the repository will be forked from the original and copied to the KubeRocketCI-integrated repository. If Import is selected, the platform connects to the chosen repository.

      The added application will be listed in the Applications list allowing you to do the following:

      Applications menu
      Applications menu

      • Application status - displays the application status. Can be red or green depending on if the KubeRocketCI portal managed to connect to the Git Server with the specified credentials or not.
      • Application name (clickable) - displays the application name set during the application creation.
      • Open documentation - opens the application related documentation page.
      • Enable filtering - enables filtering by application name and namespace where this custom resource is located in.
      • Create new application - displays the Create new component menu.
      • Edit application - edit the application by selecting the options icon next to its name in the applications list, and then selecting Edit. For details see the Edit Existing Application section.
      • Delete application - remove application by clicking the vertical ellipsis button and then selecting Delete.

        Note

        The application that is used in a CD pipeline cannot be removed.

      There are also options to sort the applications:

      • Sort the existing applications in a table by clicking the sorting icons in the table header. Sort the applications alphabetically by their name, language, build tool, framework, and CI tool. You can also sort the applications by their status: Created, Failed, or In progress.
      • Select a number of applications displayed per page (15, 25 or 50 rows) and navigate between pages if the number of applications exceeds the capacity of a single page:

        Applications pages
        Applications pages

      Edit Existing Application⚓︎

      KubeRocketCI Portal provides the ability to enable, disable or edit the Jira Integration functionality for applications.

      1. To edit an application directly from the Applications overview page or when viewing the application data:

        • Select Edit in the options icon menu:

        Edit application on the Applications overview page
        Edit application on the Applications overview page

        Edit application when viewing the application data
        Edit application when viewing the application data

        • The Edit Application dialog opens.
      2. To enable Jira integration, in the Edit Application dialog do the following:

        Edit application
        Edit application

        a. Mark the Integrate with Jira server check box and fill in the necessary fields. Please see steps d-h of the Add Application page.

        b. Select the Apply button to apply the changes.

      3. To disable Jira integration, in the Edit Application dialog do the following:

        a. Clear the Integrate with Jira server check box.

        b. Select the Apply button to apply the changes.

      4. To create, edit and delete application branches, please refer to the Manage Branches page.

      \ No newline at end of file diff --git a/user-guide/autotest/index.html b/user-guide/autotest/index.html index 20979bf87..f97bd2af6 100644 --- a/user-guide/autotest/index.html +++ b/user-guide/autotest/index.html @@ -1 +1 @@ - Manage Autotests - EPAM Delivery Platform

      Manage Autotests⚓︎

      This section describes the subsequent possible actions that can be performed with the newly added or existing autotests.

      Check and Remove Autotest⚓︎

      As soon as the autotest is successfully provisioned, the following will be created:

      • An Autotest Codebase type will appear in the Codebase list of the Components section.
      • With the Create strategy, a new project will be generated on GitHub or another integrated VCS. When Clone is chosen, the repository will be forked from the original and copied to the KubeRocketCI-integrated repository. If Import is selected, the platform connects to the chosen repository.

      Info

      To navigate quickly to Tekton, Version Control System, SonarQube, Nexus, and other resources, click the Overview section on the navigation bar and hit the necessary link.

      The added autotest will be listed in the Autotests list allowing you to do the following:

      Autotests page
      Autotests page

      • Autotest status - displays the autotest status. Can be red or green depending on KubeRocketCI portal managed to connect to the Git Server with the specified credentials or not.
      • Autotest name (clickable) - displays the autotest name set during the autotest creation.
      • Open documentation - opens the autotest related documentation page.
      • Enable filtering - enables filtering by autotest name and namespace where this custom resource is located in.
      • Create new autotest - displays the Create new component menu.
      • Edit autotest - edit the autotest by selecting the options icon next to its name in the autotests list, and then selecting Edit. For details see the Edit Existing Autotest section.
      • Delete autotest - remove autotest by clicking the vertical ellipsis button and then selecting Delete.

        Note

        The autotest that is used in a CD pipeline cannot be removed.

      There are also options to sort the autotests:

      • Sort the existing autotests in a table by clicking the sorting icons in the table header. Sort the autotests alphabetically by their name, language, build tool, framework, and CI tool. You can also sort the autotests by their status: Created, Failed, or In progress.
      • Select a number of autotests displayed per page (15, 25 or 50 rows) and navigate between pages if the number of autotests exceeds the capacity of a single page.

      Edit Existing Autotest⚓︎

      KubeRocketCI portal provides the ability to enable, disable or edit the Jira Integration functionality for autotests.

      1. To edit an autotest directly from the Autotests overview page or when viewing the autotest data:

        • Select Edit in the options icon menu:

          Edit autotest on the autotests overview page
          Edit autotest on the autotests overview page

          Edit autotest when viewing the autotest data
          Edit autotest when viewing the autotest data

      2. To enable Jira integration, on the Edit Autotest page do the following:

        Edit autotest
        Edit library

        a. Mark the Integrate with Jira server check box and fill in the necessary fields. Please see steps d-h on the Add Autotests page.

        b. Click the Apply button to apply the changes.

        Note

        To adjust the Jira integration functionality, first apply the necessary changes described on the Adjust Jira Integration and Adjust VCS Integration With Jira pages.

      3. To disable Jira integration, in the Edit Autotest dialog do the following:

        • Clear the Integrate with Jira server check box.
        • Click the Apply button to apply the changes.
      4. To create, edit and delete application branches, please refer to the Manage Branches page.

      Add Autotest as a Quality Gate⚓︎

      In order to add an autotest as a quality gate to a newly added CD pipeline, do the following:

      1. Create a CD pipeline with the necessary parameters. Please refer to the Add CD Pipeline section for the details.

      2. In the Stages menu, select the Autotest quality gate type. It means the promoting process should be confirmed by the successful passing of the autotests.

      3. In the additional fields, select the previously created autotest name and specify its branch.

      4. After filling in all the necessary fields, click the Create button to start the provisioning of the pipeline. After the CD pipeline is added, the new namespace containing the stage name will be created in Kubernetes (in OpenShift, a new project will be created) with the following name pattern: [cluster name]-[cd pipeline name]-[stage name].

      Configure Autotest Launch at Specific Stage⚓︎

      In order to configure the added autotest launch at the specific stage with necessary parameters, do the following:

      1. Add the necessary stage to the CD pipeline. Please refer to the Add CD Pipeline documentation for the details.

      2. Navigate to the run.json file and add the stage name and the specific parameters.

      Launch Autotest Locally⚓︎

      There is an ability to run the autotests locally using the IDEA (Integrated Development Environment Application, such as IntelliJ, NetBeans etc.). To launch the autotest project for the local verification, perform the following steps:

      1. Clone the project to the local machine.

      2. Open the project in IDEA and find the run.json file to copy out the necessary command value.

      3. Paste the copied command value into the Command line field and run it with the necessary values and namespace.

      4. As a result, all the launched tests will be executed.

      \ No newline at end of file + Manage Autotests - EPAM Delivery Platform

      Manage Autotests⚓︎

      This section describes the subsequent possible actions that can be performed with the newly added or existing autotests.

      Check and Remove Autotest⚓︎

      As soon as the autotest is successfully provisioned, the following will be created:

      • An Autotest Codebase type will appear in the Codebase list of the Components section.
      • With the Create strategy, a new project will be generated on GitHub or another integrated VCS. When Clone is chosen, the repository will be forked from the original and copied to the KubeRocketCI-integrated repository. If Import is selected, the platform connects to the chosen repository.

      Info

      To navigate quickly to Tekton, Version Control System, SonarQube, Nexus, and other resources, click the Overview section on the navigation bar and hit the necessary link.

      The added autotest will be listed in the Autotests list allowing you to do the following:

      Autotests page
      Autotests page

      • Autotest status - displays the autotest status. Can be red or green depending on KubeRocketCI portal managed to connect to the Git Server with the specified credentials or not.
      • Autotest name (clickable) - displays the autotest name set during the autotest creation.
      • Open documentation - opens the autotest related documentation page.
      • Enable filtering - enables filtering by autotest name and namespace where this custom resource is located in.
      • Create new autotest - displays the Create new component menu.
      • Edit autotest - edit the autotest by selecting the options icon next to its name in the autotests list, and then selecting Edit. For details see the Edit Existing Autotest section.
      • Delete autotest - remove autotest by clicking the vertical ellipsis button and then selecting Delete.

        Note

        The autotest that is used in a CD pipeline cannot be removed.

      There are also options to sort the autotests:

      • Sort the existing autotests in a table by clicking the sorting icons in the table header. Sort the autotests alphabetically by their name, language, build tool, framework, and CI tool. You can also sort the autotests by their status: Created, Failed, or In progress.
      • Select a number of autotests displayed per page (15, 25 or 50 rows) and navigate between pages if the number of autotests exceeds the capacity of a single page.

      Edit Existing Autotest⚓︎

      KubeRocketCI portal provides the ability to enable, disable or edit the Jira Integration functionality for autotests.

      1. To edit an autotest directly from the Autotests overview page or when viewing the autotest data:

        • Select Edit in the options icon menu:

          Edit autotest on the autotests overview page
          Edit autotest on the autotests overview page

          Edit autotest when viewing the autotest data
          Edit autotest when viewing the autotest data

      2. To enable Jira integration, on the Edit Autotest page do the following:

        Edit autotest
        Edit library

        a. Mark the Integrate with Jira server check box and fill in the necessary fields. Please see steps d-h on the Add Autotests page.

        b. Click the Apply button to apply the changes.

        Note

        To adjust the Jira integration functionality, first apply the necessary changes described on the Adjust Jira Integration and Adjust VCS Integration With Jira pages.

      3. To disable Jira integration, in the Edit Autotest dialog do the following:

        • Clear the Integrate with Jira server check box.
        • Click the Apply button to apply the changes.
      4. To create, edit and delete application branches, please refer to the Manage Branches page.

      Add Autotest as a Quality Gate⚓︎

      In order to add an autotest as a quality gate to a newly added CD pipeline, do the following:

      1. Create a CD pipeline with the necessary parameters. Please refer to the Add CD Pipeline section for the details.

      2. In the Stages menu, select the Autotest quality gate type. It means the promoting process should be confirmed by the successful passing of the autotests.

      3. In the additional fields, select the previously created autotest name and specify its branch.

      4. After filling in all the necessary fields, click the Create button to start the provisioning of the pipeline. After the CD pipeline is added, the new namespace containing the stage name will be created in Kubernetes (in OpenShift, a new project will be created) with the following name pattern: [cluster name]-[cd pipeline name]-[stage name].

      Configure Autotest Launch at Specific Stage⚓︎

      In order to configure the added autotest launch at the specific stage with necessary parameters, do the following:

      1. Add the necessary stage to the CD pipeline. Please refer to the Add CD Pipeline documentation for the details.

      2. Navigate to the run.json file and add the stage name and the specific parameters.

      Launch Autotest Locally⚓︎

      There is an ability to run the autotests locally using the IDEA (Integrated Development Environment Application, such as IntelliJ, NetBeans etc.). To launch the autotest project for the local verification, perform the following steps:

      1. Clone the project to the local machine.

      2. Open the project in IDEA and find the run.json file to copy out the necessary command value.

      3. Paste the copied command value into the Command line field and run it with the necessary values and namespace.

      4. As a result, all the launched tests will be executed.

      \ No newline at end of file diff --git a/user-guide/build-pipeline/index.html b/user-guide/build-pipeline/index.html index 11801604f..560cbe178 100644 --- a/user-guide/build-pipeline/index.html +++ b/user-guide/build-pipeline/index.html @@ -1,4 +1,4 @@ - Build Pipeline - EPAM Delivery Platform

      Build Pipeline⚓︎

      This section provides details on the Build pipeline of the EDP CI/CD pipeline framework. Explore below the pipeline purpose, stages and possible actions to perform.

      Build Pipeline Purpose⚓︎

      The purpose of the Build pipeline contains the following points:

      • Check out, test, tag and build an image from the mainstream branch after a patch set is submitted in order to inspect whether the integrated with the mainstream code fits all quality gates, can be built and tested;
      • Be triggered if any new patch set is submitted;
      • Tag a specific commit in Gerrit in case the build is successful;
      • Build a Docker image with an application that can be afterward deployed using the Tekton/Argo CD Deploy pipeline.

      Find below the functional diagram of the Build pipeline with the default stages:

      flowchart TD
      + Build Pipeline - EPAM Delivery Platform      

      Build Pipeline⚓︎

      This section provides details on the Build pipeline of the EDP CI/CD pipeline framework. Explore below the pipeline purpose, stages and possible actions to perform.

      Build Pipeline Purpose⚓︎

      The purpose of the Build pipeline contains the following points:

      • Check out, test, tag and build an image from the mainstream branch after a patch set is submitted in order to inspect whether the integrated with the mainstream code fits all quality gates, can be built and tested;
      • Be triggered if any new patch set is submitted;
      • Tag a specific commit in Gerrit in case the build is successful;
      • Build a Docker image with an application that can be afterward deployed using the Tekton/Argo CD Deploy pipeline.

      Find below the functional diagram of the Build pipeline with the default stages:

      flowchart TD
          build --> get-nexus-repository-url
          compile --> test
          start([fa:fa-circle]) --> fetch-repository
      @@ -13,4 +13,4 @@
          sonar --> build
          test --> sonar
          update-build-number --> sast
      -   update-cbis --> stop([fa:fa-circle])

      Build Pipeline for Application and Library⚓︎

      The Build pipeline is triggered automatically after the Code Review pipeline is completed and the changes are submitted.

      To review the Build pipeline, take the following steps:

      1. Open Tekton Dashboard via Overview page of the EDP Portal.

      2. Review stages for the application and library codebases:

        • Init - initialization of the Code Review pipeline inputs;
        • Checkout - checkout of the application code;
        • Get-version - get the version from the pom.XML file and add the build number;
        • Compile - code compilation;
        • Tests - tests execution;
        • Sonar - Sonar launch that checks the whole code;
        • Build - artifact building and adding to Nexus;
        • Build-image - docker image building and adding to Docker Registry. The Build pipeline for the library has the same stages as the application except the Build-image stage, i.e. the Docker image is not building.
        • Push - artifact docker image pushing to Nexus and Docker Registry;
        • Git-tag - adding of the corresponding Git tag of the current commit to relate with the image, artifact, and build version.

      After the Build pipeline runs all the stages successfully, the corresponding tag numbers will be created in Kubernetes/OpenShift and Nexus.

      Check the Tag in Kubernetes/OpenShift and Nexus⚓︎

      1. After the Build pipeline is completed, check the tag name and the same with the commit revision. Simply navigate to Gerrit → Projects → List → select the project → Tags.

        Note

        For the Import strategy, navigate to the repository from which a codebase is imported → Tags. It is actual both for GitHub and GitLab.

      2. Open the Kubernetes/OpenShift Overview page and click the link to Nexus and check the build of a new version.

      3. Switch to Kubernetes → CodebaseImageStream (or OpenShift → Builds → Images) → click the image stream that will be used for deployment.

      4. Check the corresponding tag.

      Configure and Start Pipeline Manually⚓︎

      The Build pipeline can be started manually. To set the necessary stages and trigger the pipeline manually, take the following steps:

      1. Open the Build pipeline for the created library.

      2. Click the Build with parameters option from the left-side menu. Modify the stages by removing the whole objects massive:{"name". "tests"} where name is a key and tests is a stage name that should be executed.

      3. Open Tekton Dashboard or Component details page and check the successful execution of all stages.

      \ No newline at end of file + update-cbis --> stop([fa:fa-circle])

      Build Pipeline for Application and Library⚓︎

      The Build pipeline is triggered automatically after the Code Review pipeline is completed and the changes are submitted.

      To review the Build pipeline, take the following steps:

      1. Open Tekton Dashboard via Overview page of the EDP Portal.

      2. Review stages for the application and library codebases:

        • Init - initialization of the Code Review pipeline inputs;
        • Checkout - checkout of the application code;
        • Get-version - get the version from the pom.XML file and add the build number;
        • Compile - code compilation;
        • Tests - tests execution;
        • Sonar - Sonar launch that checks the whole code;
        • Build - artifact building and adding to Nexus;
        • Build-image - docker image building and adding to Docker Registry. The Build pipeline for the library has the same stages as the application except the Build-image stage, i.e. the Docker image is not building.
        • Push - artifact docker image pushing to Nexus and Docker Registry;
        • Git-tag - adding of the corresponding Git tag of the current commit to relate with the image, artifact, and build version.

      After the Build pipeline runs all the stages successfully, the corresponding tag numbers will be created in Kubernetes/OpenShift and Nexus.

      Check the Tag in Kubernetes/OpenShift and Nexus⚓︎

      1. After the Build pipeline is completed, check the tag name and the same with the commit revision. Simply navigate to Gerrit → Projects → List → select the project → Tags.

        Note

        For the Import strategy, navigate to the repository from which a codebase is imported → Tags. It is actual both for GitHub and GitLab.

      2. Open the Kubernetes/OpenShift Overview page and click the link to Nexus and check the build of a new version.

      3. Switch to Kubernetes → CodebaseImageStream (or OpenShift → Builds → Images) → click the image stream that will be used for deployment.

      4. Check the corresponding tag.

      Configure and Start Pipeline Manually⚓︎

      The Build pipeline can be started manually. To set the necessary stages and trigger the pipeline manually, take the following steps:

      1. Open the Build pipeline for the created library.

      2. Click the Build with parameters option from the left-side menu. Modify the stages by removing the whole objects massive:{"name". "tests"} where name is a key and tests is a stage name that should be executed.

      3. Open Tekton Dashboard or Component details page and check the successful execution of all stages.

      \ No newline at end of file diff --git a/user-guide/cd-pipeline-details/index.html b/user-guide/cd-pipeline-details/index.html index 7f4d38730..deff5d607 100644 --- a/user-guide/cd-pipeline-details/index.html +++ b/user-guide/cd-pipeline-details/index.html @@ -1 +1 @@ - CD Pipeline Details - EPAM Delivery Platform

      CD Pipeline Details⚓︎

      CD Pipeline (Continuous Delivery Pipeline) - an EDP business entity that describes the whole delivery process of the selected application set via the respective stages. The main idea of the CD pipeline is to promote the application build version between the stages by applying the sequential verification (i.e. the second stage will be available if the verification on the first stage is successfully completed). The CD pipeline can include the essential set of applications with its specific stages as well.

      In other words, the CD pipeline allows the selected image stream (Docker container in Kubernetes terms) to pass a set of stages for the verification process (SIT - system integration testing with the automatic type of a quality gate, QA - quality assurance, UAT - user acceptance testing with the manual testing).

      Note

      It is possible to change the image stream for the application in the CD pipeline. Please refer to the Edit CD Pipeline section for the details.

      A CI/CD pipeline helps to automate steps in a software delivery process, such as the code build initialization, automated tests running, and deploying to a staging or production environment. Automated pipelines remove manual errors, provide standardized development feedback cycle, and enable the fast product iterations. To get more information on the CI pipeline, please refer to the CI Pipeline Details chapter.

      The codebase stream is used as a holder for the output of the stage, i.e. after the Docker container (or an image stream in OpenShift terms) passes the stage verification, it will be placed to the new codebase stream. Every codebase has a branch that has its own codebase stream - a Docker container that is an output of the build for the corresponding branch.

      Note

      For more information on the main terms used in EPAM Delivery Platform, please refer to the EDP Glossary

      EDP CD pipeline
      EDP CD pipeline

      Explore the details of the CD pipeline below.

      Deploy Pipeline⚓︎

      The Deploy pipeline is used by default on any stage of the Continuous Delivery pipeline. It addresses the following concerns:

      • Deploying the application(s) to the main STAGE (SIT, QA, UAT) environment in order to run autotests and to promote image build versions to the next environments afterwards.
      • Deploying the application(s) to a custom STAGE environment in order to run autotests and check manually that everything is ok with the application.
      • Deploying the latest or a stable and some particular numeric version of an image build that exists in Docker registry.
      • Promoting the image build versions from the main STAGE (SIT, QA, UAT) environment.
      • Auto deploying the application(s) version from the passed payload (using the CODEBASE_VERSION job parameter).

      Find below the functional diagram of the Deploy pipeline with the default stages:

      Note

      The input for a CD pipeline depends on the Trigger Type for a deploy stage and can be either Manual or Auto.

      Deploy pipeline stages
      Deploy pipeline stages

      \ No newline at end of file + CD Pipeline Details - EPAM Delivery Platform

      CD Pipeline Details⚓︎

      CD Pipeline (Continuous Delivery Pipeline) - an EDP business entity that describes the whole delivery process of the selected application set via the respective stages. The main idea of the CD pipeline is to promote the application build version between the stages by applying the sequential verification (i.e. the second stage will be available if the verification on the first stage is successfully completed). The CD pipeline can include the essential set of applications with its specific stages as well.

      In other words, the CD pipeline allows the selected image stream (Docker container in Kubernetes terms) to pass a set of stages for the verification process (SIT - system integration testing with the automatic type of a quality gate, QA - quality assurance, UAT - user acceptance testing with the manual testing).

      Note

      It is possible to change the image stream for the application in the CD pipeline. Please refer to the Edit CD Pipeline section for the details.

      A CI/CD pipeline helps to automate steps in a software delivery process, such as the code build initialization, automated tests running, and deploying to a staging or production environment. Automated pipelines remove manual errors, provide standardized development feedback cycle, and enable the fast product iterations. To get more information on the CI pipeline, please refer to the CI Pipeline Details chapter.

      The codebase stream is used as a holder for the output of the stage, i.e. after the Docker container (or an image stream in OpenShift terms) passes the stage verification, it will be placed to the new codebase stream. Every codebase has a branch that has its own codebase stream - a Docker container that is an output of the build for the corresponding branch.

      Note

      For more information on the main terms used in EPAM Delivery Platform, please refer to the EDP Glossary

      EDP CD pipeline
      EDP CD pipeline

      Explore the details of the CD pipeline below.

      Deploy Pipeline⚓︎

      The Deploy pipeline is used by default on any stage of the Continuous Delivery pipeline. It addresses the following concerns:

      • Deploying the application(s) to the main STAGE (SIT, QA, UAT) environment in order to run autotests and to promote image build versions to the next environments afterwards.
      • Deploying the application(s) to a custom STAGE environment in order to run autotests and check manually that everything is ok with the application.
      • Deploying the latest or a stable and some particular numeric version of an image build that exists in Docker registry.
      • Promoting the image build versions from the main STAGE (SIT, QA, UAT) environment.
      • Auto deploying the application(s) version from the passed payload (using the CODEBASE_VERSION job parameter).

      Find below the functional diagram of the Deploy pipeline with the default stages:

      Note

      The input for a CD pipeline depends on the Trigger Type for a deploy stage and can be either Manual or Auto.

      Deploy pipeline stages
      Deploy pipeline stages

      \ No newline at end of file diff --git a/user-guide/ci-pipeline-details/index.html b/user-guide/ci-pipeline-details/index.html index 515deee38..f9cd05fa0 100644 --- a/user-guide/ci-pipeline-details/index.html +++ b/user-guide/ci-pipeline-details/index.html @@ -1 +1 @@ - CI Pipeline Details - EPAM Delivery Platform

      CI Pipeline Details⚓︎

      CI Pipeline (Continuous Integration Pipeline) - an EDP business entity that describes the integration of changes made to a codebase into a single project. The main idea of the CI pipeline is to review the changes in the code submitted through a Version Control System (VCS) and build a new codebase version so that it can be transmitted to the Continuous Delivery Pipeline for the rest of the delivery process.

      There are three codebase types in EPAM Delivery Platform:

      1. Applications - a codebase that is developed in the Version Control System, has the full lifecycle starting from the Code Review stage to its deployment to the environment;
      2. Libraries - this codebase is similar to the Application type, but it is not deployed and stored in the Artifactory. The library can be connected to other applications/libraries;
      3. Autotests - a codebase that inspects the code and can be used as a quality gate for the CD pipeline stage. The autotest only has the Code Review pipeline and is launched for the stage verification.

      Note

      For more information on the above mentioned codebase types, please refer to the Add Application, Add Library, Add Autotests and Autotest as Quality Gate pages.

      EDP CI pipeline
      EDP CI pipeline

      \ No newline at end of file + CI Pipeline Details - EPAM Delivery Platform

      CI Pipeline Details⚓︎

      CI Pipeline (Continuous Integration Pipeline) - an EDP business entity that describes the integration of changes made to a codebase into a single project. The main idea of the CI pipeline is to review the changes in the code submitted through a Version Control System (VCS) and build a new codebase version so that it can be transmitted to the Continuous Delivery Pipeline for the rest of the delivery process.

      There are three codebase types in EPAM Delivery Platform:

      1. Applications - a codebase that is developed in the Version Control System, has the full lifecycle starting from the Code Review stage to its deployment to the environment;
      2. Libraries - this codebase is similar to the Application type, but it is not deployed and stored in the Artifactory. The library can be connected to other applications/libraries;
      3. Autotests - a codebase that inspects the code and can be used as a quality gate for the CD pipeline stage. The autotest only has the Code Review pipeline and is launched for the stage verification.

      Note

      For more information on the above mentioned codebase types, please refer to the Add Application, Add Library, Add Autotests and Autotest as Quality Gate pages.

      EDP CI pipeline
      EDP CI pipeline

      \ No newline at end of file diff --git a/user-guide/cicd-overview/index.html b/user-guide/cicd-overview/index.html index 6ffcf23d0..d6476742b 100644 --- a/user-guide/cicd-overview/index.html +++ b/user-guide/cicd-overview/index.html @@ -1 +1 @@ - EDP CI/CD Overview - EPAM Delivery Platform

      EDP CI/CD Overview⚓︎

      This chapter provides information on CI/CD basic definitions and flow, as well as its components and process.

      CI/CD Basic Definitions⚓︎

      The Continuous Integration part means the following:

      • all components of the application development are in the same place and perform the same processes for running;
      • the results are published in one place and replicated into EPAM GitLab or VCS (version control system);
      • the repository also includes a storage tool (e.g. Nexus) for all binary artifacts that are produced by the Tekton CI server after submitting changes from Code Review tool into VCS;

      The Code Review and Build pipelines are used before the code is delivered. An important part of both of them is the integration tests that are launched during the testing stage.

      Many applications (SonarQube, Gerrit, etc,) used by the project need databases for their performance.

      The Continuous Delivery comprises an approach allowing to produce an application in short cycles so that it can be reliably released at any time point. This part is tightly bound with the usage of the Code Review, Build, and Deploy pipelines.

      The Deploy pipelines deploy the applications configuration and their specific versions, launch automated tests and control quality gates for the specified environment. As a result of the successfully completed process, the specific versions of images are promoted to the next environment. All environments are sequential and promote the build versions of applications one-by-one. The logic of each stage is described as a code of Tekton pipelines and stored in the VCS.

      During the CI/CD, there are several continuous processes that run in the repository, find below the list of possible actions:

      • Review the code with the help of Gerrit tool;
      • Run the static analysis using SonarQube to control the quality of the source code and keep the historical data which helps to understand the trend and effectivity of particular teams and members;
      • Analyze application source code using SAST, byte code, and binaries for coding/design conditions that are indicative of security vulnerabilities;
      • Build the code with Tekton CI and run automated tests that are written to make sure the applied changes will not break any functionality.

      Note

      For the details on autotests, please refer to the Autotest, Add Autotest, and Autotest as Quality Gate pages.

      The release process is divided into cycles and provides regular delivery of completed pieces of functionality while continuing the development and integration of new functionality into the product mainline.

      Explore the main flow that is displayed on the diagram below:

      EDP CI/CD pipeline
      EDP CI/CD pipeline

      \ No newline at end of file + EDP CI/CD Overview - EPAM Delivery Platform

      EDP CI/CD Overview⚓︎

      This chapter provides information on CI/CD basic definitions and flow, as well as its components and process.

      CI/CD Basic Definitions⚓︎

      The Continuous Integration part means the following:

      • all components of the application development are in the same place and perform the same processes for running;
      • the results are published in one place and replicated into EPAM GitLab or VCS (version control system);
      • the repository also includes a storage tool (e.g. Nexus) for all binary artifacts that are produced by the Tekton CI server after submitting changes from Code Review tool into VCS;

      The Code Review and Build pipelines are used before the code is delivered. An important part of both of them is the integration tests that are launched during the testing stage.

      Many applications (SonarQube, Gerrit, etc,) used by the project need databases for their performance.

      The Continuous Delivery comprises an approach allowing to produce an application in short cycles so that it can be reliably released at any time point. This part is tightly bound with the usage of the Code Review, Build, and Deploy pipelines.

      The Deploy pipelines deploy the applications configuration and their specific versions, launch automated tests and control quality gates for the specified environment. As a result of the successfully completed process, the specific versions of images are promoted to the next environment. All environments are sequential and promote the build versions of applications one-by-one. The logic of each stage is described as a code of Tekton pipelines and stored in the VCS.

      During the CI/CD, there are several continuous processes that run in the repository, find below the list of possible actions:

      • Review the code with the help of Gerrit tool;
      • Run the static analysis using SonarQube to control the quality of the source code and keep the historical data which helps to understand the trend and effectivity of particular teams and members;
      • Analyze application source code using SAST, byte code, and binaries for coding/design conditions that are indicative of security vulnerabilities;
      • Build the code with Tekton CI and run automated tests that are written to make sure the applied changes will not break any functionality.

      Note

      For the details on autotests, please refer to the Autotest, Add Autotest, and Autotest as Quality Gate pages.

      The release process is divided into cycles and provides regular delivery of completed pieces of functionality while continuing the development and integration of new functionality into the product mainline.

      Explore the main flow that is displayed on the diagram below:

      EDP CI/CD pipeline
      EDP CI/CD pipeline

      \ No newline at end of file diff --git a/user-guide/cluster/index.html b/user-guide/cluster/index.html index d84b72d0c..43b434fc7 100644 --- a/user-guide/cluster/index.html +++ b/user-guide/cluster/index.html @@ -1,3 +1,3 @@ - Manage Clusters - EPAM Delivery Platform

      Manage Clusters⚓︎

      This section describes the subsequent possible actions that can be performed with the newly added or existing clusters.

      In a nutshell, cluster in EDP Portal is a Kubernetes secret that stores credentials and endpoint to connect to the another cluster. Adding new clusters allows users to deploy applications in several clusters, thus improving flexibility of your infrastructure.

      The added cluster will be listed in the clusters list allowing you to do the following:

      Clusters list
      Clusters list

      • Cluster status - displays the cluster status. Can be red or green depending on if the KubeRocketCI portal managed to connect to the cluster host with the specified credentials or not.
      • Open documentation - opens the cluster related documentation page.
      • Add a new cluster - displays the cluster creation form.
      • Cluster properties - shows the specified cluster properties.
      • Delete cluster - remove cluster by clicking the recycle bin icon.

      View Authentication Data⚓︎

      To view authentication data that is used to log in to the cluster, run the kubectl describe command:

      kubectl describe secret cluster_name -n edp
      + Manage Clusters - EPAM Delivery Platform      

      Manage Clusters⚓︎

      This section describes the subsequent possible actions that can be performed with the newly added or existing clusters.

      In a nutshell, cluster in EDP Portal is a Kubernetes secret that stores credentials and endpoint to connect to the another cluster. Adding new clusters allows users to deploy applications in several clusters, thus improving flexibility of your infrastructure.

      The added cluster will be listed in the clusters list allowing you to do the following:

      Clusters list
      Clusters list

      • Cluster status - displays the cluster status. Can be red or green depending on if the KubeRocketCI portal managed to connect to the cluster host with the specified credentials or not.
      • Open documentation - opens the cluster related documentation page.
      • Add a new cluster - displays the cluster creation form.
      • Cluster properties - shows the specified cluster properties.
      • Delete cluster - remove cluster by clicking the recycle bin icon.

      View Authentication Data⚓︎

      To view authentication data that is used to log in to the cluster, run the kubectl describe command:

      kubectl describe secret cluster_name -n edp
       

      Delete Cluster⚓︎

      To delete cluster, use the kubectl delete command as follows:

      kubectl delete secret cluster_name -n edp
      -
      \ No newline at end of file +
      \ No newline at end of file diff --git a/user-guide/code-review-pipeline/index.html b/user-guide/code-review-pipeline/index.html index 698a784ca..32a317bae 100644 --- a/user-guide/code-review-pipeline/index.html +++ b/user-guide/code-review-pipeline/index.html @@ -1,4 +1,4 @@ - Code Review Pipeline - EPAM Delivery Platform

      Code Review Pipeline⚓︎

      This section provides details on the Code Review pipeline of the EDP CI/CD framework. Explore below the pipeline purpose, stages and possible actions to perform.

      Code Review Pipeline Purpose⚓︎

      The purpose of the Code Review pipeline contains the following points:

      • Check out and test a particular developer's change (Patch Set) in order to inspect whether the code fits all the quality gates and can be built and tested;
      • Be triggered if any new Patch Set appears in GitHub/GitLab/Gerrit;
      • Send feedback about the build process in Tekton to review the card in Gerrit;
      • Send feedback about Sonar violations that have been found during the Sonar stage.

      Find below the functional diagram of the Code Review pipeline with the default stages:

      flowchart TD
      + Code Review Pipeline - EPAM Delivery Platform      

      Code Review Pipeline⚓︎

      This section provides details on the Code Review pipeline of the EDP CI/CD framework. Explore below the pipeline purpose, stages and possible actions to perform.

      Code Review Pipeline Purpose⚓︎

      The purpose of the Code Review pipeline contains the following points:

      • Check out and test a particular developer's change (Patch Set) in order to inspect whether the code fits all the quality gates and can be built and tested;
      • Be triggered if any new Patch Set appears in GitHub/GitLab/Gerrit;
      • Send feedback about the build process in Tekton to review the card in Gerrit;
      • Send feedback about Sonar violations that have been found during the Sonar stage.

      Find below the functional diagram of the Code Review pipeline with the default stages:

      flowchart TD
          build --> dockerbuild-verify
          compile --> test
          dockerbuild-verify --> stop([fa:fa-circle])
      @@ -13,4 +13,4 @@
          start([fa:fa-circle]) --> report-pipeline-start-to-gitlab
          report-pipeline-start-to-gitlab --> fetch-repository
          sonar --> build
      -   test --> sonar
      \ No newline at end of file + test --> sonar
      \ No newline at end of file diff --git a/user-guide/components/index.html b/user-guide/components/index.html index ffec2953d..b5578f248 100644 --- a/user-guide/components/index.html +++ b/user-guide/components/index.html @@ -1 +1 @@ - Components Overview - EPAM Delivery Platform

      Components Overview⚓︎

      In this section, we will introduce you to the different types of codebases and strategies for onboarding codebases onto the KubeRocketCI.

      Component and Codebase⚓︎

      From a business perspective, Components represent the functional building blocks of software projects. They define the purpose and functionality of different parts of a business application, such as core applications, libraries, automated tests, and infrastructure settings. Components are about what software does and how it aligns with business goals.

      From a technical implementation perspective, Codebases are the Kubernetes custom resources that manage the technical aspects of these Components. They serve as the bridge between the business logic represented by Components and the underlying Git repositories. Codebases are responsible for the technical implementation, ensuring that the Components are efficiently stored, versioned, and synchronized with the version control system. They represent the state of Components from a technical standpoint.

      Components⚓︎

      Components are the building blocks of software projects. They come in different types, such as Applications, Libraries, Autotests, and Infrastructure. Each component type serves a specific purpose in the development process. Applications are the deployable unit of projects, libraries contain reusable code, autotests facilitate automated testing, and infrastructure defines a project's infrastructure settings.

      Codebases⚓︎

      Codebases are Kubernetes custom resources (CR) that represent the state of the components. They are a crucial link between component's state and underlying Git repositories. In essence, each codebase corresponds to a specific component and reflects its current state within a single Git repository. This one-to-one mapping ensures that the component's state is efficiently managed and versioned.

      Types⚓︎

      KubeRocketCI accommodates a variety of codebase types, each serving a specific purpose in the development process. The codebase types available in KubeRocketCI are:

      • Application: the codebase that contains the source code and manifests of application that can be deployed to Kubernetes clusters. One can use different languages, frameworks, and build tools to develop application.
      • Library: the codebase that contains reusable code components that can be shared across multiple projects. They are an essential resource for efficient and consistent development.
      • Autotest: the codebase that facilitates the implementation of automated tests, helping ensure the quality, performance and reliability of applications.
      • Infrastructure: Infrastructure codebases are used to define and manage the underlying infrastructure of projects using the Infrastructure as Code approach, ensuring consistency and reproducibility.

      Onboarding Strategies⚓︎

      The platform supports the following strategies to onboard codebases on the platform:

      • Create from template - This strategy allows to create a new codebase from a predefined template that matches application language, build tool, and framework. One can choose from a variety of templates that cover different technologies and use cases. This strategy is recommended for projects that start developing their applications from scratch or want to follow the best practices of KubeRocketCI.
      • Import project - This strategy allows to import an existing codebase from a Git server that is integrated with KubeRocketCI. One can select the Git server and the repository to import, and KubeRocketCI will replicate it to the platform and perform configure. This strategy is suitable for projects that already have a codebase on a Git server and want to leverage the benefits of KubeRocketCI.
      • Clone project – This strategy allows to clone an existing codebase from any Git repository that is accessible via HTTPS. One can provide the repository URL and KubeRocketCI will clone it to the platform and configure it. This strategy is useful for projects that want to copy a codebase from an external source and customize it for their needs.

      Codebase Operator⚓︎

      The codebase-operator is responsible for creating and managing the codebase custom resource on the KubeRocketCI. The codebase CR defines the metadata and configuration of the codebase, such as the name, description, type, repository URL, branch, path, CD tool, etc. The codebase-operator watches for changes in the codebase CR and synchronizes them with the corresponding Git repository and KubeRocketCI components. Learn more about the codebase-operator and the custom resource (CR) API.

      \ No newline at end of file + Components Overview - EPAM Delivery Platform

      Components Overview⚓︎

      In this section, we will introduce you to the different types of codebases and strategies for onboarding codebases onto the KubeRocketCI.

      Component and Codebase⚓︎

      From a business perspective, Components represent the functional building blocks of software projects. They define the purpose and functionality of different parts of a business application, such as core applications, libraries, automated tests, and infrastructure settings. Components are about what software does and how it aligns with business goals.

      From a technical implementation perspective, Codebases are the Kubernetes custom resources that manage the technical aspects of these Components. They serve as the bridge between the business logic represented by Components and the underlying Git repositories. Codebases are responsible for the technical implementation, ensuring that the Components are efficiently stored, versioned, and synchronized with the version control system. They represent the state of Components from a technical standpoint.

      Components⚓︎

      Components are the building blocks of software projects. They come in different types, such as Applications, Libraries, Autotests, and Infrastructure. Each component type serves a specific purpose in the development process. Applications are the deployable unit of projects, libraries contain reusable code, autotests facilitate automated testing, and infrastructure defines a project's infrastructure settings.

      Codebases⚓︎

      Codebases are Kubernetes custom resources (CR) that represent the state of the components. They are a crucial link between component's state and underlying Git repositories. In essence, each codebase corresponds to a specific component and reflects its current state within a single Git repository. This one-to-one mapping ensures that the component's state is efficiently managed and versioned.

      Types⚓︎

      KubeRocketCI accommodates a variety of codebase types, each serving a specific purpose in the development process. The codebase types available in KubeRocketCI are:

      • Application: the codebase that contains the source code and manifests of application that can be deployed to Kubernetes clusters. One can use different languages, frameworks, and build tools to develop application.
      • Library: the codebase that contains reusable code components that can be shared across multiple projects. They are an essential resource for efficient and consistent development.
      • Autotest: the codebase that facilitates the implementation of automated tests, helping ensure the quality, performance and reliability of applications.
      • Infrastructure: Infrastructure codebases are used to define and manage the underlying infrastructure of projects using the Infrastructure as Code approach, ensuring consistency and reproducibility.

      Onboarding Strategies⚓︎

      The platform supports the following strategies to onboard codebases on the platform:

      • Create from template - This strategy allows to create a new codebase from a predefined template that matches application language, build tool, and framework. One can choose from a variety of templates that cover different technologies and use cases. This strategy is recommended for projects that start developing their applications from scratch or want to follow the best practices of KubeRocketCI.
      • Import project - This strategy allows to import an existing codebase from a Git server that is integrated with KubeRocketCI. One can select the Git server and the repository to import, and KubeRocketCI will replicate it to the platform and perform configure. This strategy is suitable for projects that already have a codebase on a Git server and want to leverage the benefits of KubeRocketCI.
      • Clone project – This strategy allows to clone an existing codebase from any Git repository that is accessible via HTTPS. One can provide the repository URL and KubeRocketCI will clone it to the platform and configure it. This strategy is useful for projects that want to copy a codebase from an external source and customize it for their needs.

      Codebase Operator⚓︎

      The codebase-operator is responsible for creating and managing the codebase custom resource on the KubeRocketCI. The codebase CR defines the metadata and configuration of the codebase, such as the name, description, type, repository URL, branch, path, CD tool, etc. The codebase-operator watches for changes in the codebase CR and synchronizes them with the corresponding Git repository and KubeRocketCI components. Learn more about the codebase-operator and the custom resource (CR) API.

      \ No newline at end of file diff --git a/user-guide/configuration-overview/index.html b/user-guide/configuration-overview/index.html index b59779961..cef050379 100644 --- a/user-guide/configuration-overview/index.html +++ b/user-guide/configuration-overview/index.html @@ -1 +1 @@ - Configuration Overview - EPAM Delivery Platform

      Configuration Overview⚓︎

      The Configuration tab of the KubeRocketCI portal is designed to integrate KubeRocketCI with third-party components. In this page you will get a brief overview of all the sections presented in the Configuration tab.

      Configuration Features⚓︎

      Here is the list of all the sections provided in the Configuration tab. Familiarize yourself with purposes of each section:

      Configuration Section Description
      Links Configure links for quick access to required tools that will be displayed on the Overview page or in specific resource details, such as application or stage details. Additionally, this section is used to configure widgets, such as SonarQube and DependencyTrack.
      Nexus Integrate Nexus to store and manage your application artifacts, facilitating a Continuous Integration flow within the platform.
      Registry Integrate platform with a container registry to store container artifacts.
      Clusters Integrate platform with external clusters to enable remote cluster deployment.
      GitOps Onboard a specific repository used for the GitOps approach purposes.
      Argo CD Integrate platform with Argo CD to enable the GitOps approach.
      DefectDojo Connect platform with DefectDojo to manage and track security defects in applications.
      DependencyTrack Connect the platform with the DependencyTrack tool for monitoring and managing vulnerabilities within third-party components.
      SonarQube Integrate SonarQube to enable static code analysis.
      Git Server Connect the platform to Version Control Systems, such as GitHub, GitLab, or Gerrit for source code management.
      Jira Integrate Jira to track and deliver the status of your projects on a dedicated dashboard.
      SSO Integrate platform with identity provider to enable the Single Sign-On approach.
      \ No newline at end of file + Configuration Overview - EPAM Delivery Platform

      Configuration Overview⚓︎

      The Configuration tab of the KubeRocketCI portal is designed to integrate KubeRocketCI with third-party components. In this page you will get a brief overview of all the sections presented in the Configuration tab.

      Configuration Features⚓︎

      Here is the list of all the sections provided in the Configuration tab. Familiarize yourself with purposes of each section:

      Configuration Section Description
      Links Configure links for quick access to required tools that will be displayed on the Overview page or in specific resource details, such as application or stage details. Additionally, this section is used to configure widgets, such as SonarQube and DependencyTrack.
      Nexus Integrate Nexus to store and manage your application artifacts, facilitating a Continuous Integration flow within the platform.
      Registry Integrate platform with a container registry to store container artifacts.
      Clusters Integrate platform with external clusters to enable remote cluster deployment.
      GitOps Onboard a specific repository used for the GitOps approach purposes.
      Argo CD Integrate platform with Argo CD to enable the GitOps approach.
      DefectDojo Connect platform with DefectDojo to manage and track security defects in applications.
      DependencyTrack Connect the platform with the DependencyTrack tool for monitoring and managing vulnerabilities within third-party components.
      SonarQube Integrate SonarQube to enable static code analysis.
      Git Server Connect the platform to Version Control Systems, such as GitHub, GitLab, or Gerrit for source code management.
      Jira Integrate Jira to track and deliver the status of your projects on a dedicated dashboard.
      SSO Integrate platform with identity provider to enable the Single Sign-On approach.
      \ No newline at end of file diff --git a/user-guide/git-server-overview/index.html b/user-guide/git-server-overview/index.html index 2bfd01e06..7fd9c963f 100644 --- a/user-guide/git-server-overview/index.html +++ b/user-guide/git-server-overview/index.html @@ -1,3 +1,3 @@ - Manage Git Servers - EPAM Delivery Platform

      Manage Git Servers⚓︎

      Git Server is responsible for integration with Version Control System, whether it is GitHub, GitLab or Gerrit.

      The Git Server is set via the global.gitProviders parameter of the values.yaml file.

      To view the current Git Server, you can open KubeRocketCI -> Configuration -> Git Servers and inspect the following properties:

      Git Server menu
      Git Server menu

      • Git Server status and name - displays the Git Server status, which depends on the Git Server integration status (Success/Failed).
      • Git Server properties - displays the Git Server type, its host address, username, SSH/HTTPS port, public and private SSH keys.
      • Open documentation - opens the "Manage Git Servers" documentation page.
      • Undo/Save changes - these buttons apply or revert changes made to the Git Server.

      View Authentication Data⚓︎

      To view authentication data that is used to connect to the Git server, use kubectl describe command as follows:

      kubectl describe GitServer git_server_name -n edp
      + Manage Git Servers - EPAM Delivery Platform      

      Manage Git Servers⚓︎

      Git Server is responsible for integration with Version Control System, whether it is GitHub, GitLab or Gerrit.

      The Git Server is set via the global.gitProviders parameter of the values.yaml file.

      To view the current Git Server, you can open KubeRocketCI -> Configuration -> Git Servers and inspect the following properties:

      Git Server menu
      Git Server menu

      • Git Server status and name - displays the Git Server status, which depends on the Git Server integration status (Success/Failed).
      • Git Server properties - displays the Git Server type, its host address, username, SSH/HTTPS port, public and private SSH keys.
      • Open documentation - opens the "Manage Git Servers" documentation page.
      • Undo/Save changes - these buttons apply or revert changes made to the Git Server.

      View Authentication Data⚓︎

      To view authentication data that is used to connect to the Git server, use kubectl describe command as follows:

      kubectl describe GitServer git_server_name -n edp
       

      Delete Git Server⚓︎

      To remove a Git Server from the Git Servers list, utilize the kubectl delete command as follows:

      kubectl delete GitServer git_server_name -n edp
      -
      \ No newline at end of file +
      \ No newline at end of file diff --git a/user-guide/gitops/index.html b/user-guide/gitops/index.html index 0535e9fa8..51bca5184 100644 --- a/user-guide/gitops/index.html +++ b/user-guide/gitops/index.html @@ -1,2 +1,2 @@ - Manage GitOps - EPAM Delivery Platform

      Manage GitOps⚓︎

      This page is dedicated to the GitOps section of the Configuration tab, the process of establishing the GitOps repository, outline benefits it extends to users within the platform capabilities. GitOps, short for "Git Operations", is a modern approach to managing and automating infrastructure and application deployments. In GitOps, the desired state of your environment is declared and stored in a Git repository. With GitOps, you can ensure that your infrastructure and applications are always in sync with your intended configurations and readily adapt to changing requirements.

      Overview⚓︎

      The purpose of the GitOps section is to provide users with the ability to customize the state of their environments with the help of GitOps approach that enables you to store your entire deployment configuration in a Git repository, providing version control for changes, consistent collaboration, and automated deployments. Enforcing GitOps allows you to declaratively define and automate your configurations, ensuring consistency, version control, and collaboration within your team.

      Add GitOps Repository⚓︎

      GitOps repository is added in two steps:

      1. Navigate to KubeRocketCI -> Components -> GitOps. Fill in the required fields (in case VCS supports nesting) and click Save:

        Required fields
        Required fields

      2. Check the GitOps repository connected to the platform:

        System Codebase
        System Codebase

      In addition to it, the system Codebase is called the same as the GitOps repository will be added to the Codebase list of the Components section:

      GitOps Codebase
      GitOps Codebase

      Note

      The platform allows only one GitOps repository at a time.

      GitOps Usage⚓︎

      Once the GitOps repository is added to the platform, you can set custom parameters for the deployed Helm Chart. To redefine the parameters, follow the steps below:

      1. In the GitOps repository, create the values.yaml file according to the <pipeline-name>/<stage-name>/<application-name>-values.yaml pattern.

      2. In the created values.yaml file, enter the parameters with their custom values.

      3. Navigate to the Environments section. Open the created environment, open its stage and deploy it with the Values override checkbox selected as it is shown below:

        GitOps Codebase
        GitOps Codebase

      Delete GitOps Repository⚓︎

      In case you need to delete the GitOps repository, do the following:

      1. Delete the GitOps repository in the Git provider.

      2. Delete the Codebase custom resource using the kubectl delete command:

        kubectl delete Codebase edp-gitops -n edp
        -
      \ No newline at end of file + Manage GitOps - EPAM Delivery Platform

      Manage GitOps⚓︎

      This page is dedicated to the GitOps section of the Configuration tab, the process of establishing the GitOps repository, outline benefits it extends to users within the platform capabilities. GitOps, short for "Git Operations", is a modern approach to managing and automating infrastructure and application deployments. In GitOps, the desired state of your environment is declared and stored in a Git repository. With GitOps, you can ensure that your infrastructure and applications are always in sync with your intended configurations and readily adapt to changing requirements.

      Overview⚓︎

      The purpose of the GitOps section is to provide users with the ability to customize the state of their environments with the help of GitOps approach that enables you to store your entire deployment configuration in a Git repository, providing version control for changes, consistent collaboration, and automated deployments. Enforcing GitOps allows you to declaratively define and automate your configurations, ensuring consistency, version control, and collaboration within your team.

      Add GitOps Repository⚓︎

      GitOps repository is added in two steps:

      1. Navigate to KubeRocketCI -> Components -> GitOps. Fill in the required fields (in case VCS supports nesting) and click Save:

        Required fields
        Required fields

      2. Check the GitOps repository connected to the platform:

        System Codebase
        System Codebase

      In addition to it, the system Codebase is called the same as the GitOps repository will be added to the Codebase list of the Components section:

      GitOps Codebase
      GitOps Codebase

      Note

      The platform allows only one GitOps repository at a time.

      GitOps Usage⚓︎

      Once the GitOps repository is added to the platform, you can set custom parameters for the deployed Helm Chart. To redefine the parameters, follow the steps below:

      1. In the GitOps repository, create the values.yaml file according to the <pipeline-name>/<stage-name>/<application-name>-values.yaml pattern.

      2. In the created values.yaml file, enter the parameters with their custom values.

      3. Navigate to the Environments section. Open the created environment, open its stage and deploy it with the Values override checkbox selected as it is shown below:

        GitOps Codebase
        GitOps Codebase

      Delete GitOps Repository⚓︎

      In case you need to delete the GitOps repository, do the following:

      1. Delete the GitOps repository in the Git provider.

      2. Delete the Codebase custom resource using the kubectl delete command:

        kubectl delete Codebase edp-gitops -n edp
        +
      \ No newline at end of file diff --git a/user-guide/index.html b/user-guide/index.html index 3d4f5a252..91db218e6 100644 --- a/user-guide/index.html +++ b/user-guide/index.html @@ -1 +1 @@ - Overview - EPAM Delivery Platform

      Overview⚓︎

      The KubeRocketCI (a.k.a. EPAM Delivery Platform) portal user guide is intended for developers and provides details on working with the KubeRocketCI portal, different codebase types, and the KubeRocketCI CI/CD flow.

      KubeRocketCI Portal⚓︎

      The KubeRocketCI portal is a central management tool in the KubeRocketCI ecosystem that provides the ability to define pipelines, project resources and new technologies in a simple way. Using the KubeRocketCI portal enables to manage business entities:

      • Create such codebase types as Applications, Libraries, Autotests and Infrastructures;
      • Create/Update CD Pipelines;
      • Add external Git servers and Clusters.

      Below is the Overview page of the KubeRocketCI portal:

      Overview page
      Overview page

      • Application widgets – shows the information on codebases created in the default and allowed namespaces, reflecting the overall amount of entities and their statuses.
      • Top bar panel – contains documentation link, notifications, KubeRocketCI portal settings, and cluster settings, such as default and allowed namespaces.
      • Quick links – displays the corresponding links to the major adjusted toolset.
      • Pipeline runs – displays all the pipeline runs initiated in both the default and allowed namespaces.

      KubeRocketCI portal is a complete tool allowing to manage and control the codebases (applications, autotests, libraries and infrastructures) added to the environment as well as to create a CD pipeline.

      Inspect the main features available in the KubeRocketCI portal by following the corresponding link:

      \ No newline at end of file + Overview - EPAM Delivery Platform

      Overview⚓︎

      The KubeRocketCI (a.k.a. EPAM Delivery Platform) portal user guide is intended for developers and provides details on working with the KubeRocketCI portal, different codebase types, and the KubeRocketCI CI/CD flow.

      KubeRocketCI Portal⚓︎

      The KubeRocketCI portal is a central management tool in the KubeRocketCI ecosystem that provides the ability to define pipelines, project resources and new technologies in a simple way. Using the KubeRocketCI portal enables to manage business entities:

      • Create such codebase types as Applications, Libraries, Autotests and Infrastructures;
      • Create/Update CD Pipelines;
      • Add external Git servers and Clusters.

      Below is the Overview page of the KubeRocketCI portal:

      Overview page
      Overview page

      • Application widgets – shows the information on codebases created in the default and allowed namespaces, reflecting the overall amount of entities and their statuses.
      • Top bar panel – contains documentation link, notifications, KubeRocketCI portal settings, and cluster settings, such as default and allowed namespaces.
      • Quick links – displays the corresponding links to the major adjusted toolset.
      • Pipeline runs – displays all the pipeline runs initiated in both the default and allowed namespaces.

      KubeRocketCI portal is a complete tool allowing to manage and control the codebases (applications, autotests, libraries and infrastructures) added to the environment as well as to create a CD pipeline.

      Inspect the main features available in the KubeRocketCI portal by following the corresponding link:

      \ No newline at end of file diff --git a/user-guide/infrastructure/index.html b/user-guide/infrastructure/index.html index af758e866..163f1678e 100644 --- a/user-guide/infrastructure/index.html +++ b/user-guide/infrastructure/index.html @@ -1 +1 @@ - Manage Infrastructures - EPAM Delivery Platform

      Manage Infrastructures⚓︎

      This section describes the subsequent possible actions that can be performed with the newly added or existing infrastructures.

      Check and Remove Inrastructure⚓︎

      As soon as the infrastructure is successfully provisioned, the following will be created:

      • An Infrastructure Codebase type will appear in the Codebase list of the Components section.
      • With the Create strategy, a new project will be generated on GitHub or another integrated VCS. When Clone is chosen, the repository will be forked from the original and copied to the KubeRocketCI-integrated repository. If Import is selected, the platform connects to the chosen repository.

      The added infrastructure will be listed in the infrastructure list allowing you to do the following:

      Components menu
      Components menu

      • Infrastructure status - displays the infrastructure status. Can be red or green depending on if the KubeRocketCI portal managed to connect to the Git Server with the specified credentials or not.
      • Infrastructure name (clickable) - displays the infrastructure name set during the library creation.
      • Open documentation - opens the infrastructure related documentation page.
      • Enable filtering - enables filtering by library name and namespace where this custom resource is located in.
      • Create new infrastructure - displays the Create new component menu.
      • Edit infrastructure - edit the infrastructure by selecting the options icon next to its name in the infrastructures list, and then selecting Edit. For details see the Edit Existing Infrastructure section.
      • Delete infrastructure - remove infrastructure by clicking the vertical ellipsis button and then selecting Delete.

      There are also options to sort the infrastructures:

      • Sort the existing infrastructures in a table by clicking the sorting icons in the table header. Sort the infrastructures alphabetically by their name, language, build tool, framework, and CI tool. You can also sort the infrastructures by their status: Created, Failed, or In progress.
      • Select a number of infrastructures displayed per page (15, 25 or 50 rows) and navigate between pages if the number of items exceeds the capacity of a single page.

      Edit Existing Infrastructure⚓︎

      KubeRocketCI portal provides the ability to enable, disable or edit the Jira Integration functionality for infrastructures.

      1. To edit an infrastructure directly from the infrastructures overview page or when viewing the infrastructure data:

        • Select Edit in the options icon menu:

        Edit infrastructure on the Infrastructures overview page
        Edit infrastructure on the Infrastructures overview page

        Edit infrastructure when viewing the infrastructure data
        Edit infrastructure when viewing the infrastructure data

        • The Edit Infrastructure dialog opens.
      2. To enable Jira integration, in the Edit Infrastructure dialog do the following:

        Edit infrastructure
        Edit infrastructure

        a. Mark the Integrate with Jira server check box and fill in the necessary fields. Please see steps d-h on the Add Infrastructure page.

        b. Select the Apply button to apply the changes.

      3. To disable Jira integration, in the Edit Infrastructure dialog do the following:

        a. Clear the Integrate with Jira server check box.

        b. Select the Apply button to apply the changes.

      4. To create, edit and delete infrastructure branches, please refer to the Manage Branches page.

      \ No newline at end of file + Manage Infrastructures - EPAM Delivery Platform

      Manage Infrastructures⚓︎

      This section describes the subsequent possible actions that can be performed with the newly added or existing infrastructures.

      Check and Remove Inrastructure⚓︎

      As soon as the infrastructure is successfully provisioned, the following will be created:

      • An Infrastructure Codebase type will appear in the Codebase list of the Components section.
      • With the Create strategy, a new project will be generated on GitHub or another integrated VCS. When Clone is chosen, the repository will be forked from the original and copied to the KubeRocketCI-integrated repository. If Import is selected, the platform connects to the chosen repository.

      The added infrastructure will be listed in the infrastructure list allowing you to do the following:

      Components menu
      Components menu

      • Infrastructure status - displays the infrastructure status. Can be red or green depending on if the KubeRocketCI portal managed to connect to the Git Server with the specified credentials or not.
      • Infrastructure name (clickable) - displays the infrastructure name set during the library creation.
      • Open documentation - opens the infrastructure related documentation page.
      • Enable filtering - enables filtering by library name and namespace where this custom resource is located in.
      • Create new infrastructure - displays the Create new component menu.
      • Edit infrastructure - edit the infrastructure by selecting the options icon next to its name in the infrastructures list, and then selecting Edit. For details see the Edit Existing Infrastructure section.
      • Delete infrastructure - remove infrastructure by clicking the vertical ellipsis button and then selecting Delete.

      There are also options to sort the infrastructures:

      • Sort the existing infrastructures in a table by clicking the sorting icons in the table header. Sort the infrastructures alphabetically by their name, language, build tool, framework, and CI tool. You can also sort the infrastructures by their status: Created, Failed, or In progress.
      • Select a number of infrastructures displayed per page (15, 25 or 50 rows) and navigate between pages if the number of items exceeds the capacity of a single page.

      Edit Existing Infrastructure⚓︎

      KubeRocketCI portal provides the ability to enable, disable or edit the Jira Integration functionality for infrastructures.

      1. To edit an infrastructure directly from the infrastructures overview page or when viewing the infrastructure data:

        • Select Edit in the options icon menu:

        Edit infrastructure on the Infrastructures overview page
        Edit infrastructure on the Infrastructures overview page

        Edit infrastructure when viewing the infrastructure data
        Edit infrastructure when viewing the infrastructure data

        • The Edit Infrastructure dialog opens.
      2. To enable Jira integration, in the Edit Infrastructure dialog do the following:

        Edit infrastructure
        Edit infrastructure

        a. Mark the Integrate with Jira server check box and fill in the necessary fields. Please see steps d-h on the Add Infrastructure page.

        b. Select the Apply button to apply the changes.

      3. To disable Jira integration, in the Edit Infrastructure dialog do the following:

        a. Clear the Integrate with Jira server check box.

        b. Select the Apply button to apply the changes.

      4. To create, edit and delete infrastructure branches, please refer to the Manage Branches page.

      \ No newline at end of file diff --git a/user-guide/library/index.html b/user-guide/library/index.html index 203914d75..94b69d03e 100644 --- a/user-guide/library/index.html +++ b/user-guide/library/index.html @@ -1 +1 @@ - Manage Libraries - EPAM Delivery Platform

      Manage Libraries⚓︎

      This section describes the subsequent possible actions that can be performed with the newly added or existing libraries.

      Check and Remove Library⚓︎

      As soon as the library is successfully provisioned, the following will be created:

      • A Library Codebase type will appear in the Codebase list of the Components section.
      • With the Create strategy, a new project will be generated on GitHub or another integrated VCS. When Clone is chosen, the repository will be forked from the original and copied to the KubeRocketCI-integrated repository. If Import is selected, the platform connects to the chosen repository.

      Info

      To navigate quickly to OpenShift, Tekton, Gerrit, SonarQube, Nexus, and other resources, click the Overview section on the navigation bar and hit the necessary link.

      The added library will be listed in the Libraries list allowing to do the following:

      Library menu
      Library menu

      • Library status - displays the library status. Can be red or green depending on if the KubeRocketCI portal managed to connect to the Git Server with the specified credentials or not.
      • Library name (clickable) - displays the library name set during the library creation.
      • Open documentation - opens the library related documentation page.
      • Enable filtering - enables filtering by library name and namespace where this custom resource is located in.
      • Create new library - displays the Create new component menu.
      • Edit library - edit the library by selecting the options icon next to its name in the libraries list, and then selecting Edit. For details see the Edit Existing Library section.
      • Delete Library - remove library by clicking the vertical ellipsis button and then selecting Delete.

        Note

        The library that is used in a CD pipeline cannot be removed.

      There are also options to sort the libraries:

      • Sort the existing libraries in a table by clicking the sorting icons in the table header. Sort the libraries alphabetically by their name, language, build tool, framework, and CI tool. You can also sort the libraries by their status: Created, Failed, or In progress.
      • Select a number of libraries displayed per page (15, 25 or 50 rows) and navigate between pages if the number of libraries exceeds the capacity of a single page.

      Edit Existing Library⚓︎

      KubeRocketCI portal provides the ability to enable, disable or edit the Jira Integration functionality for libraries.

      1. To edit a library directly from the Libraries overview page or when viewing the library data:

        • Select Edit in the options icon menu:

          Edit library on the libraries overview page
          Edit library on the libraries overview page

        • Select Edit in the library details menu:

          Edit library when viewing the library data
          Edit library when viewing the library data

      2. To enable Jira integration, in the Edit Library dialog do the following:

        Edit library
        Edit library

        a. Mark the Integrate with Jira server check box and fill in the necessary fields. Please see the steps d-h of the Add Library page.

        b. Select the Apply button to apply the changes.

      3. To disable Jira integration, in the Edit Library dialog do the following:

        a. Clear the Integrate with Jira server check box.

        b. Select the Apply button to apply the changes.

        As a result, the necessary changes will be applied.

      4. To create, edit and delete library branches, please refer to the Manage Branches page.

      \ No newline at end of file + Manage Libraries - EPAM Delivery Platform

      Manage Libraries⚓︎

      This section describes the subsequent possible actions that can be performed with the newly added or existing libraries.

      Check and Remove Library⚓︎

      As soon as the library is successfully provisioned, the following will be created:

      • A Library Codebase type will appear in the Codebase list of the Components section.
      • With the Create strategy, a new project will be generated on GitHub or another integrated VCS. When Clone is chosen, the repository will be forked from the original and copied to the KubeRocketCI-integrated repository. If Import is selected, the platform connects to the chosen repository.

      Info

      To navigate quickly to OpenShift, Tekton, Gerrit, SonarQube, Nexus, and other resources, click the Overview section on the navigation bar and hit the necessary link.

      The added library will be listed in the Libraries list allowing to do the following:

      Library menu
      Library menu

      • Library status - displays the library status. Can be red or green depending on if the KubeRocketCI portal managed to connect to the Git Server with the specified credentials or not.
      • Library name (clickable) - displays the library name set during the library creation.
      • Open documentation - opens the library related documentation page.
      • Enable filtering - enables filtering by library name and namespace where this custom resource is located in.
      • Create new library - displays the Create new component menu.
      • Edit library - edit the library by selecting the options icon next to its name in the libraries list, and then selecting Edit. For details see the Edit Existing Library section.
      • Delete Library - remove library by clicking the vertical ellipsis button and then selecting Delete.

        Note

        The library that is used in a CD pipeline cannot be removed.

      There are also options to sort the libraries:

      • Sort the existing libraries in a table by clicking the sorting icons in the table header. Sort the libraries alphabetically by their name, language, build tool, framework, and CI tool. You can also sort the libraries by their status: Created, Failed, or In progress.
      • Select a number of libraries displayed per page (15, 25 or 50 rows) and navigate between pages if the number of libraries exceeds the capacity of a single page.

      Edit Existing Library⚓︎

      KubeRocketCI portal provides the ability to enable, disable or edit the Jira Integration functionality for libraries.

      1. To edit a library directly from the Libraries overview page or when viewing the library data:

        • Select Edit in the options icon menu:

          Edit library on the libraries overview page
          Edit library on the libraries overview page

        • Select Edit in the library details menu:

          Edit library when viewing the library data
          Edit library when viewing the library data

      2. To enable Jira integration, in the Edit Library dialog do the following:

        Edit library
        Edit library

        a. Mark the Integrate with Jira server check box and fill in the necessary fields. Please see the steps d-h of the Add Library page.

        b. Select the Apply button to apply the changes.

      3. To disable Jira integration, in the Edit Library dialog do the following:

        a. Clear the Integrate with Jira server check box.

        b. Select the Apply button to apply the changes.

        As a result, the necessary changes will be applied.

      4. To create, edit and delete library branches, please refer to the Manage Branches page.

      \ No newline at end of file diff --git a/user-guide/manage-branches/index.html b/user-guide/manage-branches/index.html index 93f325807..05fc2978a 100644 --- a/user-guide/manage-branches/index.html +++ b/user-guide/manage-branches/index.html @@ -1 +1 @@ - Manage Branches - EPAM Delivery Platform

      Manage Branches⚓︎

      This page describes how to manage branches in the created component, whether it is an application, library, autotest or infrastructure.

      Add New Branch⚓︎

      Note

      When working with libraries, pay attention when specifying the branch name: the branch name is involved in the formation of the library version, so it must comply with the Semantic Versioning rules for the library.

      When adding a component, the default branch is a master branch. In order to add a new branch, follow the steps below:

      1. Navigate to the Branches block by clicking the component name link in the Components list.

      2. Click the + Create button:

        Add branch
        Add branch

      3. Click Edit YAML in the upper-right corner of the dialog to open the YAML editor and add a branch. Otherwise, fill in the required fields in the dialog:

        New branch
        New branch

        a. Release Branch - select the Release Branch check box if you need to create a release branch.

        b. Branch name - type the branch name. Pay attention that this field remains static if you create a release branch. For the Clone and Import strategies: if you want to use the existing branch, enter its name into this field.

        c. From Commit Hash - paste the commit hash from which the branch will be created. For the Clone and Import strategies: Note that if the From Commit Hash field is empty, the latest commit from the branch name will be used.

        d. Branch version - enter the necessary branch version for the artifact. The Release Candidate (RC) postfix is concatenated to the branch version number.

        e. Default branch version - type the branch version that will be used in a master branch after the release creation. The Snapshot postfix is concatenated to the master branch version number.

        f. Click the Apply button and wait until the new branch will be added to the list.

        Info

        Adding of a new branch is indicated in the context of the EDP versioning type.

      The default component repository is cloned and changed to the new indicated version before the build, i.e. the new indicated version will not be committed to the repository; thus, the existing repository will keep the default version.

      Build Branch⚓︎

      In order to build branch from the latest commit, do the following:

      1. Navigate to the Branches block by clicking the library name link in the Libraries list.

      2. Click the Build button:

        Build branch
        Build branch

      The pipeline run status is displayed near the branch name in the Branches block:

      Pipeline run status in KubeRocketCI portal
      Pipeline run status in KubeRocketCI portal

      The corresponding item appears on the Tekton Dashboard in the PipelineRuns section:

      Pipeline run status in Tekton
      Pipeline run status in Tekton

      As an alternative way, click the tree diagram icon to observe the real-time status of the pipeline run:

      Tree diagram icon
      Tree diagram icon

      The tree diagram window is presented below:

      Tree diagram window
      Tree diagram window

      Delete Branch⚓︎

      Note

      The default master/main branch cannot be removed.

      In order to delete the added branch with the corresponding record in the KubeRocketCI portal database, do the following:

      1. Navigate to the Branches block by clicking the component name link in the components list.
      2. Select the Actions icon related to the necessary branch and then click Delete:

        Delete branch
        Delete branch

      \ No newline at end of file + Manage Branches - EPAM Delivery Platform

      Manage Branches⚓︎

      This page describes how to manage branches in the created component, whether it is an application, library, autotest or infrastructure.

      Add New Branch⚓︎

      Note

      When working with libraries, pay attention when specifying the branch name: the branch name is involved in the formation of the library version, so it must comply with the Semantic Versioning rules for the library.

      When adding a component, the default branch is a master branch. In order to add a new branch, follow the steps below:

      1. Navigate to the Branches block by clicking the component name link in the Components list.

      2. Click the + Create button:

        Add branch
        Add branch

      3. Click Edit YAML in the upper-right corner of the dialog to open the YAML editor and add a branch. Otherwise, fill in the required fields in the dialog:

        New branch
        New branch

        a. Release Branch - select the Release Branch check box if you need to create a release branch.

        b. Branch name - type the branch name. Pay attention that this field remains static if you create a release branch. For the Clone and Import strategies: if you want to use the existing branch, enter its name into this field.

        c. From Commit Hash - paste the commit hash from which the branch will be created. For the Clone and Import strategies: Note that if the From Commit Hash field is empty, the latest commit from the branch name will be used.

        d. Branch version - enter the necessary branch version for the artifact. The Release Candidate (RC) postfix is concatenated to the branch version number.

        e. Default branch version - type the branch version that will be used in a master branch after the release creation. The Snapshot postfix is concatenated to the master branch version number.

        f. Click the Apply button and wait until the new branch will be added to the list.

        Info

        Adding of a new branch is indicated in the context of the EDP versioning type.

      The default component repository is cloned and changed to the new indicated version before the build, i.e. the new indicated version will not be committed to the repository; thus, the existing repository will keep the default version.

      Build Branch⚓︎

      In order to build branch from the latest commit, do the following:

      1. Navigate to the Branches block by clicking the library name link in the Libraries list.

      2. Click the Build button:

        Build branch
        Build branch

      The pipeline run status is displayed near the branch name in the Branches block:

      Pipeline run status in KubeRocketCI portal
      Pipeline run status in KubeRocketCI portal

      The corresponding item appears on the Tekton Dashboard in the PipelineRuns section:

      Pipeline run status in Tekton
      Pipeline run status in Tekton

      As an alternative way, click the tree diagram icon to observe the real-time status of the pipeline run:

      Tree diagram icon
      Tree diagram icon

      The tree diagram window is presented below:

      Tree diagram window
      Tree diagram window

      Delete Branch⚓︎

      Note

      The default master/main branch cannot be removed.

      In order to delete the added branch with the corresponding record in the KubeRocketCI portal database, do the following:

      1. Navigate to the Branches block by clicking the component name link in the components list.
      2. Select the Actions icon related to the necessary branch and then click Delete:

        Delete branch
        Delete branch

      \ No newline at end of file diff --git a/user-guide/manage-container-registries/index.html b/user-guide/manage-container-registries/index.html index bdfbc77a4..2a231452d 100644 --- a/user-guide/manage-container-registries/index.html +++ b/user-guide/manage-container-registries/index.html @@ -1 +1 @@ - Manage Registries - EPAM Delivery Platform

      Manage Container Registries⚓︎

      This guide provides instructions on integrating the container registry with the KubeRocketCI.

      Supported Registry Providers⚓︎

      The following table displays the registry services supported for both OpenShift and Kubernetes clusters.

      Container Registry OpenShift Kubernetes
      AWS ECR + +
      DockerHub + +
      Harbor + +
      OpenShift (OCR) + -
      Nexus + +
      GitHub (GHCR) + +

      Add Container Registry⚓︎

      Follow a three-step process to integrate a container registry in KubeRocketCI:

      1. In the KubeRocketCI -> Configuration -> Registry:

        Registry settings
        Registry settings

      2. Select Registry Provider and enter the required details.

      3. Confirm settings by clicking the Save button.

      The required fields vary depending on which container registry is chosen:

      AWS ECR settings
      AWS ECR settings

      Fields Description
      Registry Endpoint Format for the (AWS) Elastic Container Registry endpoint: xxxxxxxxxxxx.dkr.ecr..amazonaws.com. Where xxxxxxxxxxxx is your AWS account ID and <region> is where your ECR is hosted.
      Registry Space The suffix project name in registry.
      AWS Region The geographic area where the (AWS) Elastic Container Registry repository is hosted.
      Authentication/IRSA Role ARN Specify the IAM role with a policy for interacting with ECR with your Kubernetes cluster.

      DockerHub settings
      DockerHub settings

      Fields Description
      Registry Space The unique identifier/name of the user or company linked to your DockerHub account.
      User The user account ID or community user account ID with push permission.
      Password/Token Provide the Password/Token corresponding to your DockerHub account. It is recommended to use Token for security purposes.
      Checkbox/Use the Push Account's credentials Check this to use the same account for pulling and pushing operations. If unchecked, please enter the user account ID and Password/Token for your DockerHub account or community user account ID with pull permission.

      Harbor settings
      Harbor settings

      Fields Description
      Registry Endpoint Enter Harbor registry endpoint URL, for example, registry.example.com.
      Registry Space The project name in registry.
      User Provide the robot account name with push permissions.
      Password/Token Provide the secret corresponding to your harbor account.
      Checkbox/Use the Push Account's credentials Check this to use the same account for pulling and pushing operations. Provide the account name with pull permissions.

      OpenShift settings
      OpenShift settings

      Fields Description
      Registry Endpoint OpenShift service registry endpoint URL (e.g., image-registry.openshift-image-registry.svc:5000).
      Project The project name in registry.
      Password/Token Supply the password for the user who has pull authorization privileges in your OpenShift container image registry.

      Nexus settings
      Nexus settings

      Fields Description
      Registry Endpoint Nexus service registry endpoint URL (e.g., image-registry.nexus-image-registry.svc:5000).
      Repository Specify the Nexus repository that corresponds to your project.
      User Provide the username with push permissions.
      Password/Token Enter the confidential combination used for authenticating your access to the container registry.

      GitHub container registry settings
      GitHub container registry settings

      Fields Description
      Registry Space The unique identifier/name of the user or company linked to your GitHub account.
      User The user account ID or community user account ID with push permission.
      Password/Token Provide the Token corresponding to your GitHub account. The minimal set of permissions required for the token is described in the Integrate GitHub/GitLab in Tekton.
      Checkbox/Use the Push Account's credentials Check this to use the same account for pulling and pushing operations. If unchecked, please enter the user account ID and Token for your GitHub account or community user account ID with pull permission.

      Remove Container Registry⚓︎

      To remove container registry integration from KubeRocketCI, follow the steps below:

      Warning

      Proceed with caution, removing registry settings might disrupt your CI/CD process. All new components created after changing the registry such as Components and Environments will start working out of the box. To work with existing codebases and pipelines familiarize with the change container registry guide.

      1. In the KubeRocketCI -> Configuration -> Registry.

      2. Click the Reset registry button, type the confirm word and then click Confirm:

      Registry settings
      Registry settings

      \ No newline at end of file + Manage Registries - EPAM Delivery Platform

      Manage Container Registries⚓︎

      This guide provides instructions on integrating the container registry with the KubeRocketCI.

      Supported Registry Providers⚓︎

      The following table displays the registry services supported for both OpenShift and Kubernetes clusters.

      Container Registry OpenShift Kubernetes
      AWS ECR + +
      DockerHub + +
      Harbor + +
      OpenShift (OCR) + -
      Nexus + +
      GitHub (GHCR) + +

      Add Container Registry⚓︎

      Follow a three-step process to integrate a container registry in KubeRocketCI:

      1. In the KubeRocketCI -> Configuration -> Registry:

        Registry settings
        Registry settings

      2. Select Registry Provider and enter the required details.

      3. Confirm settings by clicking the Save button.

      The required fields vary depending on which container registry is chosen:

      AWS ECR settings
      AWS ECR settings

      Fields Description
      Registry Endpoint Format for the (AWS) Elastic Container Registry endpoint: xxxxxxxxxxxx.dkr.ecr..amazonaws.com. Where xxxxxxxxxxxx is your AWS account ID and <region> is where your ECR is hosted.
      Registry Space The suffix project name in registry.
      AWS Region The geographic area where the (AWS) Elastic Container Registry repository is hosted.
      Authentication/IRSA Role ARN Specify the IAM role with a policy for interacting with ECR with your Kubernetes cluster.

      DockerHub settings
      DockerHub settings

      Fields Description
      Registry Space The unique identifier/name of the user or company linked to your DockerHub account.
      User The user account ID or community user account ID with push permission.
      Password/Token Provide the Password/Token corresponding to your DockerHub account. It is recommended to use Token for security purposes.
      Checkbox/Use the Push Account's credentials Check this to use the same account for pulling and pushing operations. If unchecked, please enter the user account ID and Password/Token for your DockerHub account or community user account ID with pull permission.

      Harbor settings
      Harbor settings

      Fields Description
      Registry Endpoint Enter Harbor registry endpoint URL, for example, registry.example.com.
      Registry Space The project name in registry.
      User Provide the robot account name with push permissions.
      Password/Token Provide the secret corresponding to your harbor account.
      Checkbox/Use the Push Account's credentials Check this to use the same account for pulling and pushing operations. Provide the account name with pull permissions.

      OpenShift settings
      OpenShift settings

      Fields Description
      Registry Endpoint OpenShift service registry endpoint URL (e.g., image-registry.openshift-image-registry.svc:5000).
      Project The project name in registry.
      Password/Token Supply the password for the user who has pull authorization privileges in your OpenShift container image registry.

      Nexus settings
      Nexus settings

      Fields Description
      Registry Endpoint Nexus service registry endpoint URL (e.g., image-registry.nexus-image-registry.svc:5000).
      Repository Specify the Nexus repository that corresponds to your project.
      User Provide the username with push permissions.
      Password/Token Enter the confidential combination used for authenticating your access to the container registry.

      GitHub container registry settings
      GitHub container registry settings

      Fields Description
      Registry Space The unique identifier/name of the user or company linked to your GitHub account.
      User The user account ID or community user account ID with push permission.
      Password/Token Provide the Token corresponding to your GitHub account. The minimal set of permissions required for the token is described in the Integrate GitHub/GitLab in Tekton.
      Checkbox/Use the Push Account's credentials Check this to use the same account for pulling and pushing operations. If unchecked, please enter the user account ID and Token for your GitHub account or community user account ID with pull permission.

      Remove Container Registry⚓︎

      To remove container registry integration from KubeRocketCI, follow the steps below:

      Warning

      Proceed with caution, removing registry settings might disrupt your CI/CD process. All new components created after changing the registry such as Components and Environments will start working out of the box. To work with existing codebases and pipelines familiarize with the change container registry guide.

      1. In the KubeRocketCI -> Configuration -> Registry.

      2. Click the Reset registry button, type the confirm word and then click Confirm:

      Registry settings
      Registry settings

      \ No newline at end of file diff --git a/user-guide/manage-environments/index.html b/user-guide/manage-environments/index.html index 2641feac7..afaa5feca 100644 --- a/user-guide/manage-environments/index.html +++ b/user-guide/manage-environments/index.html @@ -1 +1 @@ - Manage Environments - EPAM Delivery Platform

      Manage Environments⚓︎

      This page describes actions that can be performed to an already created environment. If no environments are created yet, navigate to the Add Environment page:

      Environments page
      Environments page

      • Environment status - displays the environment status. Can be red or green depending on if the KubeRocketCI portal managed to connect to the Git Server with the specified credentials or not.
      • Environment name (clickable) - displays the Git Server name set during the Git Server creation.
      • Open documentation - opens the documentation that leads to this page.
      • Enable filtering - enables filtering by Git Server name and namespace where this environment is located in.
      • Create new environment - displays the Create new component menu.
      • Edit environment - edit the environment by selecting the options icon next to its name in the environment list, and then selecting Edit. For details see the Edit Existing Environment section.
      • Delete environment - remove environment by clicking the vertical ellipsis button and then selecting Delete.

        Note

        Please keep in mind that after deleting the environment, all the created resources within the environment will be deleted.

      View Environment Details⚓︎

      To view environment details, click the environment name in the environments list. Once clicked, the following data will be displayed:

      Environment details
      Environment details

      • Filters - enables filtering by stage name, stage applications and stage health status.
      • Open environment in Argo CD - opens the corresponding resource in Argo CD.
      • Edit environment - allows to edit some parameters of the environment.
      • Delete environment - allows to remove the environment.
      • Create new stage - displays the Create stage menu.
      • Stage name (clickable) - opens the stage details page.
      • Stage status - displays the status of the created stage.
      • Create new stage - displays the Create stage menu.
      • Application name (clickable) - opens the details of the application that is deployed within the stage.
      • Application deployment status - displays the deployed application.
      • Open application logs - opens the the application container logs.
      • Open application terminal - opens the container terminal window.
      • Open application resource in Argo CD - opens a new tab with Argo CD resources related to the application.
      • Open stage in Argo CD / Grafana / Kibana - allows to view the stage in Argo CD, Grafana or Kibana.

      Edit Existing Environment⚓︎

      Edit the environment directly from the environment overview page or when viewing the environment data:

      1. Select Edit in the options icon menu next to the environment name:

        Edit environment when viewing the environment data
        Edit environment when viewing the environment data

      2. Apply the necessary changes (edit the list of applications for deploy, application branches, and promotion in the pipeline). Add new extra stages by clicking the plus sign icon and filling in the application branch and promotion in the pipeline.

        Edit environment dialog
        Edit environment dialog

      3. Click the Apply button to confirm the changes.

      Add a New Stage⚓︎

      In order to create a new stage for the existing environment, follow the steps below:

      1. Navigate to the Stages block by clicking the environment name link in the environments list.

      2. Click the Create stage button:

        Add environment stage
        Add environment stage

      3. Fill in the required fields in the dialog. Alternatively, click Edit YAML in the upper-right corner of the Create stage dialog to open the YAML editor and add a stage. Please see the Stages Menu section for details.

      4. Click the Apply button.

      Edit Stage⚓︎

      In order to edit a stage for the existing environment, follow the steps below:

      1. Navigate to the Stages block by clicking the environment name link in the environments list.

        Edit environment stage
        Edit environment stage

      2. Select the options icon related to the necessary stage and then select Edit.

        Edit environment stage dialog
        Edit environment stage dialog

      3. In the Edit Stage dialog, change the stage trigger type. See more about this field in the Stages Menu section.

      4. Click the Apply button.

      Delete Stage⚓︎

      Note

      You cannot remove the last stage, as the environment does not exist without at least one.

      In order to delete a stage for the existing environment, follow the steps below:

      1. Navigate to the Stages block by clicking the environment name link in the environments list.

      2. Click the name of the stage that needs to be deleted:

        Delete environment stage
        Delete environment stage

      3. Click the recycle bin button to open the stage deletion menu:

        Delete environment stage
        Delete environment stage

      View Stage Data⚓︎

      To view the environment stage data for the existing environment, follow the steps below:

      1. Navigate to the Stages block by clicking the environment name link in the environments list.

        Expand environment stage
        Expand environment stage

      2. Click stage name. The following blocks will be displayed:

        Environment stage overview
        Environment stage overview

        a. Applications - displays the status of the applications related to the stage and allows deploying the applications. Applications health and sync statuses are returned from the Argo CD tool.
        b. Pipelines - displays all the deploy pipeline runs launched for this stage.
        c. Monitoring - opens the grafana window that allows for watching various metrics.

      Deploy Application⚓︎

      To deploy an application, follow the steps below:

      Deploy the promoted application
      Deploy the promoted application

      1. Navigate to the Applications block of the stage and select an application.

      2. Select the image stream version from the drop-down list.

      3. (Optional) Enable setting custom values for Helm Charts. For more details, please refer to the Manage GitOps page.

      4. Click Deploy. The application will be deployed in the Argo CD tool as well.

      Info

      In case of using OpenShift internal registry, if the deployment fails with the ImagePullBackOff error, delete the pod that was created for this application.

      To update application, use the Deploy button:

      Update the application
      Update the application

      To uninstall the application, click the Uninstall button:

      Uninstall the application
      Uninstall the application

      As a result, the application will be updated or uninstalled in the Argo CD tool as well.

      Note

      In a nutshell, the Update button updates your image version in the Helm chart, whereas the Uninstall button deletes the Helm chart from the namespace where the Argo CD application is deployed.

      Troubleshoot Application⚓︎

      There is a couple of KubeRocketCI portal capabilities that will help in monitoring and troubleshooting deployed applications, namely, terminal and logs.

      To inspect the deployed application in KubeRocketCI portal, take the following steps:

      1. Open the application logs by clicking the Show Logs button:

        Show Logs button
        Show Logs button

      2. Inspect the shown logs:

        Inspect Logs
        Inspect Logs

      3. Open the application terminal by clicking the Show Terminal button:

        Show Terminal button
        Show Terminal button

      4. Operate the terminal to fix the problem if any:

        Inspect application
        Inspect application

      \ No newline at end of file + Manage Environments - EPAM Delivery Platform

      Manage Environments⚓︎

      This page describes actions that can be performed to an already created environment. If no environments are created yet, navigate to the Add Environment page:

      Environments page
      Environments page

      • Environment status - displays the environment status. Can be red or green depending on if the KubeRocketCI portal managed to connect to the Git Server with the specified credentials or not.
      • Environment name (clickable) - displays the Git Server name set during the Git Server creation.
      • Open documentation - opens the documentation that leads to this page.
      • Enable filtering - enables filtering by Git Server name and namespace where this environment is located in.
      • Create new environment - displays the Create new component menu.
      • Edit environment - edit the environment by selecting the options icon next to its name in the environment list, and then selecting Edit. For details see the Edit Existing Environment section.
      • Delete environment - remove environment by clicking the vertical ellipsis button and then selecting Delete.

        Note

        Please keep in mind that after deleting the environment, all the created resources within the environment will be deleted.

      View Environment Details⚓︎

      To view environment details, click the environment name in the environments list. Once clicked, the following data will be displayed:

      Environment details
      Environment details

      • Filters - enables filtering by stage name, stage applications and stage health status.
      • Open environment in Argo CD - opens the corresponding resource in Argo CD.
      • Edit environment - allows to edit some parameters of the environment.
      • Delete environment - allows to remove the environment.
      • Create new stage - displays the Create stage menu.
      • Stage name (clickable) - opens the stage details page.
      • Stage status - displays the status of the created stage.
      • Create new stage - displays the Create stage menu.
      • Application name (clickable) - opens the details of the application that is deployed within the stage.
      • Application deployment status - displays the deployed application.
      • Open application logs - opens the the application container logs.
      • Open application terminal - opens the container terminal window.
      • Open application resource in Argo CD - opens a new tab with Argo CD resources related to the application.
      • Open stage in Argo CD / Grafana / Kibana - allows to view the stage in Argo CD, Grafana or Kibana.

      Edit Existing Environment⚓︎

      Edit the environment directly from the environment overview page or when viewing the environment data:

      1. Select Edit in the options icon menu next to the environment name:

        Edit environment when viewing the environment data
        Edit environment when viewing the environment data

      2. Apply the necessary changes (edit the list of applications for deploy, application branches, and promotion in the pipeline). Add new extra stages by clicking the plus sign icon and filling in the application branch and promotion in the pipeline.

        Edit environment dialog
        Edit environment dialog

      3. Click the Apply button to confirm the changes.

      Add a New Stage⚓︎

      In order to create a new stage for the existing environment, follow the steps below:

      1. Navigate to the Stages block by clicking the environment name link in the environments list.

      2. Click the Create stage button:

        Add environment stage
        Add environment stage

      3. Fill in the required fields in the dialog. Alternatively, click Edit YAML in the upper-right corner of the Create stage dialog to open the YAML editor and add a stage. Please see the Stages Menu section for details.

      4. Click the Apply button.

      Edit Stage⚓︎

      In order to edit a stage for the existing environment, follow the steps below:

      1. Navigate to the Stages block by clicking the environment name link in the environments list.

        Edit environment stage
        Edit environment stage

      2. Select the options icon related to the necessary stage and then select Edit.

        Edit environment stage dialog
        Edit environment stage dialog

      3. In the Edit Stage dialog, change the stage trigger type. See more about this field in the Stages Menu section.

      4. Click the Apply button.

      Delete Stage⚓︎

      Note

      You cannot remove the last stage, as the environment does not exist without at least one.

      In order to delete a stage for the existing environment, follow the steps below:

      1. Navigate to the Stages block by clicking the environment name link in the environments list.

      2. Click the name of the stage that needs to be deleted:

        Delete environment stage
        Delete environment stage

      3. Click the recycle bin button to open the stage deletion menu:

        Delete environment stage
        Delete environment stage

      View Stage Data⚓︎

      To view the environment stage data for the existing environment, follow the steps below:

      1. Navigate to the Stages block by clicking the environment name link in the environments list.

        Expand environment stage
        Expand environment stage

      2. Click stage name. The following blocks will be displayed:

        Environment stage overview
        Environment stage overview

        a. Applications - displays the status of the applications related to the stage and allows deploying the applications. Applications health and sync statuses are returned from the Argo CD tool.
        b. Pipelines - displays all the deploy pipeline runs launched for this stage.
        c. Monitoring - opens the grafana window that allows for watching various metrics.

      Deploy Application⚓︎

      To deploy an application, follow the steps below:

      Deploy the promoted application
      Deploy the promoted application

      1. Navigate to the Applications block of the stage and select an application.

      2. Select the image stream version from the drop-down list.

      3. (Optional) Enable setting custom values for Helm Charts. For more details, please refer to the Manage GitOps page.

      4. Click Deploy. The application will be deployed in the Argo CD tool as well.

      Info

      In case of using OpenShift internal registry, if the deployment fails with the ImagePullBackOff error, delete the pod that was created for this application.

      To update application, use the Deploy button:

      Update the application
      Update the application

      To uninstall the application, click the Uninstall button:

      Uninstall the application
      Uninstall the application

      As a result, the application will be updated or uninstalled in the Argo CD tool as well.

      Note

      In a nutshell, the Update button updates your image version in the Helm chart, whereas the Uninstall button deletes the Helm chart from the namespace where the Argo CD application is deployed.

      Troubleshoot Application⚓︎

      There is a couple of KubeRocketCI portal capabilities that will help in monitoring and troubleshooting deployed applications, namely, terminal and logs.

      To inspect the deployed application in KubeRocketCI portal, take the following steps:

      1. Open the application logs by clicking the Show Logs button:

        Show Logs button
        Show Logs button

      2. Inspect the shown logs:

        Inspect Logs
        Inspect Logs

      3. Open the application terminal by clicking the Show Terminal button:

        Show Terminal button
        Show Terminal button

      4. Operate the terminal to fix the problem if any:

        Inspect application
        Inspect application

      \ No newline at end of file diff --git a/user-guide/marketplace/index.html b/user-guide/marketplace/index.html index d890575ca..2c2d0ca57 100644 --- a/user-guide/marketplace/index.html +++ b/user-guide/marketplace/index.html @@ -1 +1 @@ - Marketplace Overview - EPAM Delivery Platform

      Marketplace Overview⚓︎

      The KubeRocketCI marketplace offers a range of Templates, predefined tools and settings for creating software. These Templates speed up development, minimize errors, and ensure consistency. A key KubeRocketCI marketplace feature is customization. Organizations can create and share their own Templates, finely tuned to their needs. Each Template serves as a tailored blueprint of tools and settings.

      These tailored Templates include preset CI/CD pipelines, automating your development workflows. From initial integration to final deployment, these processes are efficiently managed. Whether for new applications or existing ones, these Templates enhance processes, save time, and ensure consistency.

      To see the Marketplace section, navigate to the Marketplace tab. General look of the Marketplace section is described below:

      Marketplace section
      Marketplace section (listed view)

      • Marketplace templates - all the components marketplace can offer;
      • Template properties - the item summary that shows the type, category, language, framework, build tool and maturity;
      • Enable/disable filters - enables users to enable/disable searching by the item name or namespace it is available in;
      • Change view - allows switching from the listed view to the tiled one and vice versa. See the view options for details.

      There is also a possibility to switch into the tiled view instead of the listed one:

      Marketplace section
      Marketplace section (tiled view)

      To view the details of a marketplace item, simply click its name:

      Item details
      Item details

      The details window shows supplemental information, such as item's author, keywords, release version and the link to the repository it is located in. The window also contains the Create from template button that allows users to create the component by the chosen template. The procedure of creating new components is described in the Add Component via Marketplace page.

      \ No newline at end of file + Marketplace Overview - EPAM Delivery Platform

      Marketplace Overview⚓︎

      The KubeRocketCI marketplace offers a range of Templates, predefined tools and settings for creating software. These Templates speed up development, minimize errors, and ensure consistency. A key KubeRocketCI marketplace feature is customization. Organizations can create and share their own Templates, finely tuned to their needs. Each Template serves as a tailored blueprint of tools and settings.

      These tailored Templates include preset CI/CD pipelines, automating your development workflows. From initial integration to final deployment, these processes are efficiently managed. Whether for new applications or existing ones, these Templates enhance processes, save time, and ensure consistency.

      To see the Marketplace section, navigate to the Marketplace tab. General look of the Marketplace section is described below:

      Marketplace section
      Marketplace section (listed view)

      • Marketplace templates - all the components marketplace can offer;
      • Template properties - the item summary that shows the type, category, language, framework, build tool and maturity;
      • Enable/disable filters - enables users to enable/disable searching by the item name or namespace it is available in;
      • Change view - allows switching from the listed view to the tiled one and vice versa. See the view options for details.

      There is also a possibility to switch into the tiled view instead of the listed one:

      Marketplace section
      Marketplace section (tiled view)

      To view the details of a marketplace item, simply click its name:

      Item details
      Item details

      The details window shows supplemental information, such as item's author, keywords, release version and the link to the repository it is located in. The window also contains the Create from template button that allows users to create the component by the chosen template. The procedure of creating new components is described in the Add Component via Marketplace page.

      \ No newline at end of file diff --git a/user-guide/opa-stages/index.html b/user-guide/opa-stages/index.html index 8109e00b2..4b31232b0 100644 --- a/user-guide/opa-stages/index.html +++ b/user-guide/opa-stages/index.html @@ -1 +1 @@ - Use Open Policy Agent - EPAM Delivery Platform

      Use Open Policy Agent⚓︎

      Open Policy Agent (OPA) is a policy engine that provides:

      • High-level declarative policy language Rego;
      • API and tooling for policy execution.

      EPAM Delivery Platform ensures the implemented Open Policy Agent support allowing to work with Open Policy Agent bundles that is processed by means of stages in the Code Review and Build pipelines. These pipelines are expected to be created after the Rego OPA Library is added.

      Code Review Pipeline Stages⚓︎

      In the Code Review pipeline, the following stages are available:

      1. checkout stage, a standard step during which all files are checked out from a selected branch of the Git repository.

      2. tests stage containing a script that performs the following actions:

        2.1. Runs policy tests.

        2.2. Converts OPA test results into JUnit format.

        2.3. Publishes JUnit-formatted results.

      Build Pipeline Stages⚓︎

      In the Build pipeline, the following stages are available:

      1. checkout stage, a standard step during which all files are checked out from a selected branch of the Git repository.

      2. get-version optional stage, a step where library version is determined either via:

        2.1. Standard EDP versioning functionality.

        2.2. Manually specified version. In this case .manifest file in a root directory MUST be provided. File must contain a JSON document with revision field. Minimal example: { "revision": "1.0.0" }".

      3. tests stage containing a script that performs the following actions: 3.1. Runs policy tests. 3.2. Converts OPA test results into JUnit format. 3.3. Publishes JUnit-formatted results.

      4. git-tag stage, a standard step where git branch is tagged with a version.

      \ No newline at end of file + Use Open Policy Agent - EPAM Delivery Platform

      Use Open Policy Agent⚓︎

      Open Policy Agent (OPA) is a policy engine that provides:

      • High-level declarative policy language Rego;
      • API and tooling for policy execution.

      EPAM Delivery Platform ensures the implemented Open Policy Agent support allowing to work with Open Policy Agent bundles that is processed by means of stages in the Code Review and Build pipelines. These pipelines are expected to be created after the Rego OPA Library is added.

      Code Review Pipeline Stages⚓︎

      In the Code Review pipeline, the following stages are available:

      1. checkout stage, a standard step during which all files are checked out from a selected branch of the Git repository.

      2. tests stage containing a script that performs the following actions:

        2.1. Runs policy tests.

        2.2. Converts OPA test results into JUnit format.

        2.3. Publishes JUnit-formatted results.

      Build Pipeline Stages⚓︎

      In the Build pipeline, the following stages are available:

      1. checkout stage, a standard step during which all files are checked out from a selected branch of the Git repository.

      2. get-version optional stage, a step where library version is determined either via:

        2.1. Standard EDP versioning functionality.

        2.2. Manually specified version. In this case .manifest file in a root directory MUST be provided. File must contain a JSON document with revision field. Minimal example: { "revision": "1.0.0" }".

      3. tests stage containing a script that performs the following actions: 3.1. Runs policy tests. 3.2. Converts OPA test results into JUnit format. 3.3. Publishes JUnit-formatted results.

      4. git-tag stage, a standard step where git branch is tagged with a version.

      \ No newline at end of file diff --git a/user-guide/prepare-for-release/index.html b/user-guide/prepare-for-release/index.html index c20d0afe6..5686044da 100644 --- a/user-guide/prepare-for-release/index.html +++ b/user-guide/prepare-for-release/index.html @@ -1 +1 @@ - Prepare for Release - EPAM Delivery Platform

      Prepare for Release⚓︎

      After the necessary applications are added to EDP, they can be managed via the Admin Console. To prepare for the release, create a new branch from a selected commit with a set of CI pipelines (Code Review and Build pipelines), launch the Build pipeline, and add a new CD pipeline as well.

      Note

      Please refer to the Add Application and Add CD Pipeline for the details on how to add an application or a CD pipeline.

      Become familiar with the following preparation steps for release and a CD pipeline structure:

      • Create a new branch
      • Launch the Build pipeline
      • Add a new CD pipeline
      • Check CD pipeline structure

      Create a New Branch⚓︎

      1. Open Gerrit via the Admin Console Overview page to have this tab available in a web browser.

      2. Being in Admin Console, open the Applications section and click an application from the list to create a new branch.

      3. Once clicked the application name, scroll down to the Branches menu and click the Create button to open the Create New Branch dialog box, fill in the Branch Name field by typing a branch name.

        • Open the Gerrit tab in the web browser, navigate to Projects → List → select the application → Branches → gitweb for a necessary branch.
        • Select the commit that will be the last included to a new branch commit.
        • Copy to clipboard the commit hash.
      4. Paste the copied hash to the From Commit Hash field and click Proceed.

      Note

      If the commit hash is not added to the From Commit Hash field, the new branch will be created from the head of the master branch.

      Launch the Build Pipeline⚓︎

      1. After the new branches are added, open the details page of every application and click the CI link that refers to Jenkins.

        Note

        The adding of a new branch may take some time. As soon as the new branch is created, it will be displayed in the list of the Branches menu.

      2. To build a new version of a corresponding Docker container (an image stream in OpenShift terms) for the new branch, start the Build pipeline. Being in Jenkins, select the new branch tab and click the link to the Build pipeline.

      3. Navigate to the Build with Parameters option and click the Build button to launch the Build pipeline.

        Warning

        The predefined default parameters should not be changed when triggering the Build pipeline, otherwise, it will lead to the pipeline failure.

      Add a New CD Pipeline⚓︎

      1. Add a new CD pipeline and indicate the new release branch using the Admin console tool. Pay attention to the Applications menu, the necessary application(s) should be selected there, as well as the necessary branch(es) from the drop-down list.

        Note

        For the details on how to add a CD pipeline, please refer to the Add CD Pipeline page.

      2. As soon as the Build pipelines are successfully passed in Jenkins, the Docker Registry, which is used in EDP by default, will have the new image streams (Docker container in Kubernetes terms) version that corresponds to the current branch.

      3. Open the Kubernetes/OpenShift page of the project via the Admin Console Overview page → go to CodebaseImageStream (in OpenShift, go to Builds → Images) → check whether the image streams are created under the specific name (the combination of the application and branch names) and the specific tags are added. Click every image stream link.

      Check CD Pipeline Structure⚓︎

      When the CD pipeline is added through the Admin Console, it becomes available in the CD pipelines list. Every pipeline has the details page with the additional information. To explore the CD pipeline structure, follow the steps below:

      1. Open Admin Console and navigate to Continuous Delivery section, click the newly created CD pipeline name.

      2. Discover the CD pipeline components:

        • Applications - the list of applications with the image streams and links to Jenkins for the respective branch;
        • Stages - a set of stages with the defined characteristics and links to Kubernetes/OpenShift project;

        Note

        Initially, an environment is empty and does not have any deployment unit. When deploying the subsequent stages, the artifacts of the selected versions will be deployed to the current project and the environment will display the current stage status. The project has a standard pattern: ‹edp-name›-‹pipeline-name›-‹stage-name›.

        • Deployed Versions - the deployment status of the specific application and the predefined stage.

      Launch CD Pipeline Manually⚓︎

      Follow the steps below to deploy the QA and UAT application stages:

      1. As soon as the Build pipelines for both applications are successfully passed, the new version of the Docker container will appear, thus allowing to launch the CD pipeline. Simply navigate to Continuous Delivery and click the pipeline name to open it in Jenkins.

      2. Click the QA stage link.

      3. Deploy the QA stage by clicking the Build Now option.

      4. After the initialization step starts, in case another menu is opened, the Pause for Input option will appear. Select the application version in the drop-down list and click Proceed. The pipeline passes the following stages:

        • Init - initialization of the Jenkins pipeline outputs with the stages that are the Groovy scripts that execute the current code;
        • Deploy - the deployment of the selected versions of the docker container and third-party services. As soon as the Deployed pipeline stage is completed, the respective environment will be deployed.
        • Approve - the verification stage that enables to Proceed or Abort this stage;
        • Promote-images - the creation of the new image streams for the current versions with the pattern combination: [pipeline name]-[stage name]-[application name]-[verified];

        After all the stages are passed, the new image streams will be created in the Kubernetes/OpenShift with the new names.

      5. Deploy the UAT stage, which takes the versions that were verified during the QA stage, by clicking the Build Now option, and select the necessary application versions. The launch process is the same as for all the deploy pipelines.

      6. To get the status of the pipeline deployment, open the CD pipeline details page and check the Deployed versions state.

      CD Pipeline as a Team Environment⚓︎

      Admin Console allows creating a CD pipeline with a part of the application set as a team environment. To do this, perform the following steps;

      1. Open the Continuous Delivery section → click the Create button → enter the pipeline name (e.g. team-a) → select ONE application and choose the master branch for it → add one DEV stage.
      2. As soon as the CD pipeline is added to the CD pipelines list, its details page will display the links to Jenkins and Kubernetes/OpenShift.
      3. Open Jenkins and deploy the DEV stage by clicking the Build Now option.
      4. Kubernetes/OpenShift keeps an independent environment that allows checking the new versions, thus speeding up the developing process when working with several microservices.

      As a result, the team will have the same abilities to verify the code changes when developing and during the release.

      \ No newline at end of file + Prepare for Release - EPAM Delivery Platform

      Prepare for Release⚓︎

      After the necessary applications are added to EDP, they can be managed via the Admin Console. To prepare for the release, create a new branch from a selected commit with a set of CI pipelines (Code Review and Build pipelines), launch the Build pipeline, and add a new CD pipeline as well.

      Note

      Please refer to the Add Application and Add CD Pipeline for the details on how to add an application or a CD pipeline.

      Become familiar with the following preparation steps for release and a CD pipeline structure:

      • Create a new branch
      • Launch the Build pipeline
      • Add a new CD pipeline
      • Check CD pipeline structure

      Create a New Branch⚓︎

      1. Open Gerrit via the Admin Console Overview page to have this tab available in a web browser.

      2. Being in Admin Console, open the Applications section and click an application from the list to create a new branch.

      3. Once clicked the application name, scroll down to the Branches menu and click the Create button to open the Create New Branch dialog box, fill in the Branch Name field by typing a branch name.

        • Open the Gerrit tab in the web browser, navigate to Projects → List → select the application → Branches → gitweb for a necessary branch.
        • Select the commit that will be the last included to a new branch commit.
        • Copy to clipboard the commit hash.
      4. Paste the copied hash to the From Commit Hash field and click Proceed.

      Note

      If the commit hash is not added to the From Commit Hash field, the new branch will be created from the head of the master branch.

      Launch the Build Pipeline⚓︎

      1. After the new branches are added, open the details page of every application and click the CI link that refers to Jenkins.

        Note

        The adding of a new branch may take some time. As soon as the new branch is created, it will be displayed in the list of the Branches menu.

      2. To build a new version of a corresponding Docker container (an image stream in OpenShift terms) for the new branch, start the Build pipeline. Being in Jenkins, select the new branch tab and click the link to the Build pipeline.

      3. Navigate to the Build with Parameters option and click the Build button to launch the Build pipeline.

        Warning

        The predefined default parameters should not be changed when triggering the Build pipeline, otherwise, it will lead to the pipeline failure.

      Add a New CD Pipeline⚓︎

      1. Add a new CD pipeline and indicate the new release branch using the Admin console tool. Pay attention to the Applications menu, the necessary application(s) should be selected there, as well as the necessary branch(es) from the drop-down list.

        Note

        For the details on how to add a CD pipeline, please refer to the Add CD Pipeline page.

      2. As soon as the Build pipelines are successfully passed in Jenkins, the Docker Registry, which is used in EDP by default, will have the new image streams (Docker container in Kubernetes terms) version that corresponds to the current branch.

      3. Open the Kubernetes/OpenShift page of the project via the Admin Console Overview page → go to CodebaseImageStream (in OpenShift, go to Builds → Images) → check whether the image streams are created under the specific name (the combination of the application and branch names) and the specific tags are added. Click every image stream link.

      Check CD Pipeline Structure⚓︎

      When the CD pipeline is added through the Admin Console, it becomes available in the CD pipelines list. Every pipeline has the details page with the additional information. To explore the CD pipeline structure, follow the steps below:

      1. Open Admin Console and navigate to Continuous Delivery section, click the newly created CD pipeline name.

      2. Discover the CD pipeline components:

        • Applications - the list of applications with the image streams and links to Jenkins for the respective branch;
        • Stages - a set of stages with the defined characteristics and links to Kubernetes/OpenShift project;

        Note

        Initially, an environment is empty and does not have any deployment unit. When deploying the subsequent stages, the artifacts of the selected versions will be deployed to the current project and the environment will display the current stage status. The project has a standard pattern: ‹edp-name›-‹pipeline-name›-‹stage-name›.

        • Deployed Versions - the deployment status of the specific application and the predefined stage.

      Launch CD Pipeline Manually⚓︎

      Follow the steps below to deploy the QA and UAT application stages:

      1. As soon as the Build pipelines for both applications are successfully passed, the new version of the Docker container will appear, thus allowing to launch the CD pipeline. Simply navigate to Continuous Delivery and click the pipeline name to open it in Jenkins.

      2. Click the QA stage link.

      3. Deploy the QA stage by clicking the Build Now option.

      4. After the initialization step starts, in case another menu is opened, the Pause for Input option will appear. Select the application version in the drop-down list and click Proceed. The pipeline passes the following stages:

        • Init - initialization of the Jenkins pipeline outputs with the stages that are the Groovy scripts that execute the current code;
        • Deploy - the deployment of the selected versions of the docker container and third-party services. As soon as the Deployed pipeline stage is completed, the respective environment will be deployed.
        • Approve - the verification stage that enables to Proceed or Abort this stage;
        • Promote-images - the creation of the new image streams for the current versions with the pattern combination: [pipeline name]-[stage name]-[application name]-[verified];

        After all the stages are passed, the new image streams will be created in the Kubernetes/OpenShift with the new names.

      5. Deploy the UAT stage, which takes the versions that were verified during the QA stage, by clicking the Build Now option, and select the necessary application versions. The launch process is the same as for all the deploy pipelines.

      6. To get the status of the pipeline deployment, open the CD pipeline details page and check the Deployed versions state.

      CD Pipeline as a Team Environment⚓︎

      Admin Console allows creating a CD pipeline with a part of the application set as a team environment. To do this, perform the following steps;

      1. Open the Continuous Delivery section → click the Create button → enter the pipeline name (e.g. team-a) → select ONE application and choose the master branch for it → add one DEV stage.
      2. As soon as the CD pipeline is added to the CD pipelines list, its details page will display the links to Jenkins and Kubernetes/OpenShift.
      3. Open Jenkins and deploy the DEV stage by clicking the Build Now option.
      4. Kubernetes/OpenShift keeps an independent environment that allows checking the new versions, thus speeding up the developing process when working with several microservices.

      As a result, the team will have the same abilities to verify the code changes when developing and during the release.

      \ No newline at end of file diff --git a/user-guide/quick-links/index.html b/user-guide/quick-links/index.html index 07188ec30..da947a5e2 100644 --- a/user-guide/quick-links/index.html +++ b/user-guide/quick-links/index.html @@ -1 +1 @@ - Manage Quick Links - EPAM Delivery Platform

      Manage Quick Links⚓︎

      The Links section is designed to store all the component references in one place. Additional feature is to display these references directly in the Overview page of the KubeRocketCI portal. Some of the links are also located in the application details and stage details pages. Integrating some of the tools will also make these links clickable.

      To create a Quick Link, follow the steps below:

      1. Navigate to KubeRocketCI -> Configuration -> Links and click the + Create Link button:

        Links section
        Links section

      2. In the appeared window, insert the link name, URL, and SVG icon in base 64 format. Click the checkbox if you want your link to be displayed on the overview page and click Apply:

        Create reference menu
        Create reference menu

      3. If the Show on Overview Page option is selected, the image will be displayed on the Overview page among the other links:

        Added link
        Added link

      To edit a Quick Link, follow the steps below:

      1. Navigate to KubeRocketCI -> Configuration -> Links. Click the three-dot menu and select Edit:

        Edit link button
        Edit link button

      2. Edit the parameters set for the link:

        Edit link menu
        Edit link menu

      Quick Links are deleted the following way:

      1. Navigate to KubeRocketCI -> Configuration -> Links. Click the three-dot menu and select Delete:

        Delete link button
        Delete link button

      2. In the Confirm deletion window, enter the name of the link and click Confirm:

        Delete link menu
        Delete link menu

      \ No newline at end of file + Manage Quick Links - EPAM Delivery Platform

      Manage Quick Links⚓︎

      The Links section is designed to store all the component references in one place. Additional feature is to display these references directly in the Overview page of the KubeRocketCI portal. Some of the links are also located in the application details and stage details pages. Integrating some of the tools will also make these links clickable.

      To create a Quick Link, follow the steps below:

      1. Navigate to KubeRocketCI -> Configuration -> Links and click the + Create Link button:

        Links section
        Links section

      2. In the appeared window, insert the link name, URL, and SVG icon in base 64 format. Click the checkbox if you want your link to be displayed on the overview page and click Apply:

        Create reference menu
        Create reference menu

      3. If the Show on Overview Page option is selected, the image will be displayed on the Overview page among the other links:

        Added link
        Added link

      To edit a Quick Link, follow the steps below:

      1. Navigate to KubeRocketCI -> Configuration -> Links. Click the three-dot menu and select Edit:

        Edit link button
        Edit link button

      2. Edit the parameters set for the link:

        Edit link menu
        Edit link menu

      Quick Links are deleted the following way:

      1. Navigate to KubeRocketCI -> Configuration -> Links. Click the three-dot menu and select Delete:

        Delete link button
        Delete link button

      2. In the Confirm deletion window, enter the name of the link and click Confirm:

        Delete link menu
        Delete link menu

      \ No newline at end of file diff --git a/user-guide/terraform-stages/index.html b/user-guide/terraform-stages/index.html index 89709dea7..15aa76720 100644 --- a/user-guide/terraform-stages/index.html +++ b/user-guide/terraform-stages/index.html @@ -1 +1 @@ - CI Pipelines for Terraform - EPAM Delivery Platform

      CI Pipelines for Terraform⚓︎

      EPAM Delivery Platform ensures the implemented Terraform support by adding a separate component type called Infrastructure. The Infrastructure codebase type allows to work with Terraform code that is processed by means of stages in the Code-Review and Build pipelines.

      Pipeline Stages for Terraform⚓︎

      Under the hood, Infrastructure codebase type, namely Terraform, looks quite similar to other codebase types. The distinguishing characterstic of the Infrastructure codebase type is that there is a stage called terraform-check in both of Code Review and Build pipelines. This stage runs the pre-commit activities which in their turn run the following commands and tools:

      1. Terraform fmt - the first step of the stage is basically the terraform fmt command. The terraform fmt command automatically updates the formatting of Terraform configuration files to follow the standard conventions and make the code more readable and consistent.

      2. Lock provider versions - locks the versions of the Terraform providers used in the project. This ensures that the project uses specific versions of the providers and prevents unexpected changes from impacting the infrastructure due to newer provider versions.

      3. Terraform validate - checks the syntax and validity of the Terraform configuration files. It scans the configuration files for all possible issues.

      4. Terraform docs - generates human-readable documentation for the Terraform project.

      5. Tflint - additional validation step using the tflint linter to provide more in-depth checks in addition to what the terraform validate command does.

      6. Checkov - runs the checkov command against the Terraform codebase to identify any security misconfigurations or compliance issues.

      7. Tfsec - another security-focused validation step using the tfsec command. Tfsec is a security scanner for Terraform templates that detects potential security issues and insecure configurations in the Terraform code.

      Note

      The commands and their attributes are displayed in the .pre-commit-config.yaml file.

      \ No newline at end of file + CI Pipelines for Terraform - EPAM Delivery Platform

      CI Pipelines for Terraform⚓︎

      EPAM Delivery Platform ensures the implemented Terraform support by adding a separate component type called Infrastructure. The Infrastructure codebase type allows to work with Terraform code that is processed by means of stages in the Code-Review and Build pipelines.

      Pipeline Stages for Terraform⚓︎

      Under the hood, Infrastructure codebase type, namely Terraform, looks quite similar to other codebase types. The distinguishing characterstic of the Infrastructure codebase type is that there is a stage called terraform-check in both of Code Review and Build pipelines. This stage runs the pre-commit activities which in their turn run the following commands and tools:

      1. Terraform fmt - the first step of the stage is basically the terraform fmt command. The terraform fmt command automatically updates the formatting of Terraform configuration files to follow the standard conventions and make the code more readable and consistent.

      2. Lock provider versions - locks the versions of the Terraform providers used in the project. This ensures that the project uses specific versions of the providers and prevents unexpected changes from impacting the infrastructure due to newer provider versions.

      3. Terraform validate - checks the syntax and validity of the Terraform configuration files. It scans the configuration files for all possible issues.

      4. Terraform docs - generates human-readable documentation for the Terraform project.

      5. Tflint - additional validation step using the tflint linter to provide more in-depth checks in addition to what the terraform validate command does.

      6. Checkov - runs the checkov command against the Terraform codebase to identify any security misconfigurations or compliance issues.

      7. Tfsec - another security-focused validation step using the tfsec command. Tfsec is a security scanner for Terraform templates that detects potential security issues and insecure configurations in the Terraform code.

      Note

      The commands and their attributes are displayed in the .pre-commit-config.yaml file.

      \ No newline at end of file