From 0b1bb42fda9292f9bf40d76e3eb09ac593292a7d Mon Sep 17 00:00:00 2001 From: Fabien Boucher Date: Mon, 11 Dec 2023 15:11:38 +0000 Subject: [PATCH] routes - move all routes from . to / - zuul: https:///zuul - nodepool: https:///nodepool/(builds|api) - logserver: https:///logs Extra Routes from Prometheus and Gerrit still use .. This change also updates: - static certificate management - LE certificate management Change-Id: I228d1d124ccbc8350ca6b23ae907f81d6c1aba6e --- api/v1/logserver_types.go | 2 +- cli/sfconfig/cmd/gerrit/gerrit.go | 4 +- cli/sfconfig/cmd/sfprometheus/sfprometheus.go | 2 +- cli/sfconfig/cmd/ssl/create_ssl_cert.go | 17 +++----- ...softwarefactory-project.io_logservers.yaml | 2 +- controllers/libs/base/base.go | 4 +- controllers/logserver_controller.go | 6 ++- controllers/nodepool.go | 12 ++++-- controllers/softwarefactory_controller.go | 15 ++++--- .../static/git-server/update-system-config.sh | 4 +- controllers/utils.go | 43 +++++++++---------- controllers/zuul.go | 19 +++----- doc/deployment/certificates.md | 43 ++++++++----------- doc/deployment/getting_started.md | 18 +++----- doc/deployment/nodepool.md | 2 +- doc/developer/getting_started.md | 7 +-- doc/reference/api/index.md | 2 +- doc/reference/cli/index.md | 1 - doc/user/nodepool_config_repository.md | 4 +- playbooks/group_vars/all.yaml | 14 +++--- .../check-service-uri/tasks/main.yaml | 14 +++--- .../tasks/ensure-config-check-failure.yaml | 2 +- .../tasks/ensure-config-update-success.yaml | 2 +- .../tasks/ensure-zuul-console-success.yaml | 4 +- .../tasks/main.yaml | 6 +-- .../config-update-zuul/defaults/main.yaml | 4 +- .../config-update-zuul/tasks/main.yaml | 4 +- .../health-check/pod-spawning/tasks/main.yaml | 4 +- .../defaults/main.yaml | 2 + .../tasks/main.yaml | 25 ++--------- .../defaults/main.yaml | 5 +-- .../tasks/add_ssl_secret.yaml | 1 - .../tasks/check_route.yaml | 24 +++++++---- .../test-custom-route-certs/tasks/main.yaml | 3 +- .../zuul-authenticators/tasks/main.yaml | 4 +- .../zuul-components/tasks/main.yaml | 2 +- .../zuul-connections/tasks/main.yaml | 4 +- tools/microshift/local-setup.yaml | 5 +-- 38 files changed, 152 insertions(+), 184 deletions(-) create mode 100644 roles/health-check/test-cert-manager-letsencrypt/defaults/main.yaml diff --git a/api/v1/logserver_types.go b/api/v1/logserver_types.go index f7698917..c3bf5f17 100644 --- a/api/v1/logserver_types.go +++ b/api/v1/logserver_types.go @@ -12,7 +12,7 @@ import ( // LogServerSpec defines the desired state of LogServer type LogServerSpec struct { - // The fully qualified domain name to use with the log server. Logs will be served at https://logserver.`FQDN` + // The fully qualified domain name to use with the log server. Logs will be served at https://`FQDN`/logs FQDN string `json:"fqdn"` // LetsEncrypt settings for enabling using LetsEncrypt for Routes/TLS LetsEncrypt *LetsEncryptSpec `json:"LetsEncrypt,omitempty"` diff --git a/cli/sfconfig/cmd/gerrit/gerrit.go b/cli/sfconfig/cmd/gerrit/gerrit.go index 9c4f64d7..e31f78db 100644 --- a/cli/sfconfig/cmd/gerrit/gerrit.go +++ b/cli/sfconfig/cmd/gerrit/gerrit.go @@ -364,8 +364,8 @@ func (g *GerritCMDContext) ensureGerritSTS() { func (g *GerritCMDContext) ensureGerritIngresses() { name := "gerrit" - route := base.MkHTTPSRoute(name, g.env.Ns, name, - gerritHTTPDPortName, "/", gerritHTTPDPort, map[string]string{}, g.fqdn, nil) + route := base.MkHTTPSRoute(name, g.env.Ns, name+"."+g.fqdn, + gerritHTTPDPortName, "/", gerritHTTPDPort, map[string]string{}, nil) g.ensureRoute(name, route) } diff --git a/cli/sfconfig/cmd/sfprometheus/sfprometheus.go b/cli/sfconfig/cmd/sfprometheus/sfprometheus.go index c2313117..ce2571a8 100644 --- a/cli/sfconfig/cmd/sfprometheus/sfprometheus.go +++ b/cli/sfconfig/cmd/sfprometheus/sfprometheus.go @@ -231,7 +231,7 @@ func EnsurePrometheusService(env *utils.ENV) { func (p *PromCMDContext) EnsurePrometheusRoute() { route := base.MkHTTPSRoute( - prometheusName, p.env.Ns, prometheusName, prometheusName, "/", prometheusPort, map[string]string{}, p.fqdn, nil) + prometheusName, p.env.Ns, prometheusName+"."+p.fqdn, prometheusName, "/", prometheusPort, map[string]string{}, nil) err := p.env.Cli.Get(p.env.Ctx, client.ObjectKey{ Name: prometheusName, Namespace: p.env.Ns, diff --git a/cli/sfconfig/cmd/ssl/create_ssl_cert.go b/cli/sfconfig/cmd/ssl/create_ssl_cert.go index 0094364c..4815c4be 100644 --- a/cli/sfconfig/cmd/ssl/create_ssl_cert.go +++ b/cli/sfconfig/cmd/ssl/create_ssl_cert.go @@ -24,10 +24,9 @@ import ( ) func ensureSSLSecret(env *utils.ENV, serviceCAContent []byte, - serviceCertContent []byte, serviceKeyContent []byte, serviceName string, -) { + serviceCertContent []byte, serviceKeyContent []byte) { var secret apiv1.Secret - secretName := sf.GetCustomRouteSSLSecretName(serviceName) + secretName := sf.CustomSSLSecretName data := map[string][]byte{ "CA": serviceCAContent, "crt": serviceCertContent, @@ -89,7 +88,7 @@ func verifySSLCert(serviceCAContent []byte, serviceCertContent []byte, } func CreateServiceCertSecret(sfEnv *utils.ENV, sfNamespace string, - serviceName string, sfServiceCA string, sfServiceCert string, + sfServiceCA string, sfServiceCert string, sfServiceKey string, serverName string, ) { kubernetesEnv := utils.ENV{Cli: sfEnv.Cli, Ctx: sfEnv.Ctx, Ns: sfNamespace} @@ -117,7 +116,7 @@ func CreateServiceCertSecret(sfEnv *utils.ENV, sfNamespace string, } ensureSSLSecret(&kubernetesEnv, serviceCAContent, serviceCertContent, - serviceKeyContent, serviceName) + serviceKeyContent) } @@ -129,7 +128,6 @@ var CreateCertificateCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { sfNamespace, _ := cmd.Flags().GetString("sf-namespace") sfContext, _ := cmd.Flags().GetString("sf-context") - serviceName, _ := cmd.Flags().GetString("sf-service-name") sfServiceCA, _ := cmd.Flags().GetString("sf-service-ca") sfServiceCert, _ := cmd.Flags().GetString("sf-service-cert") sfServiceKey, _ := cmd.Flags().GetString("sf-service-key") @@ -139,9 +137,8 @@ var CreateCertificateCmd = &cobra.Command{ Ns: sfNamespace, } conf := config.GetSFConfigOrDie() - serverName := serviceName + "." + conf.FQDN - CreateServiceCertSecret(&sfEnv, sfNamespace, serviceName, sfServiceCA, - sfServiceCert, sfServiceKey, serverName) + CreateServiceCertSecret(&sfEnv, sfNamespace, sfServiceCA, + sfServiceCert, sfServiceKey, conf.FQDN) }, } @@ -151,8 +148,6 @@ func init() { "Name of the namespace to copy the kubeconfig, or '-' for stdout") CreateCertificateCmd.Flags().StringP("sf-context", "", "", "The kubeconfig context of the sf-namespace, use the default context by default") - CreateCertificateCmd.Flags().StringP("sf-service-name", "", "", - "The SF service name for the SSL cert like Zuul, Gerrit, Logserver etc.") CreateCertificateCmd.Flags().StringP("sf-service-ca", "", "", "Path for the service CA certificate") CreateCertificateCmd.Flags().StringP("sf-service-cert", "", "", diff --git a/config/crd/bases/sf.softwarefactory-project.io_logservers.yaml b/config/crd/bases/sf.softwarefactory-project.io_logservers.yaml index f34cba07..aa2959d1 100644 --- a/config/crd/bases/sf.softwarefactory-project.io_logservers.yaml +++ b/config/crd/bases/sf.softwarefactory-project.io_logservers.yaml @@ -68,7 +68,7 @@ spec: type: string fqdn: description: The fully qualified domain name to use with the log server. - Logs will be served at https://logserver.`FQDN` + Logs will be served at https://`FQDN`/logs type: string settings: description: General runtime settings for the log server diff --git a/controllers/libs/base/base.go b/controllers/libs/base/base.go index d6ddcf16..be32632c 100644 --- a/controllers/libs/base/base.go +++ b/controllers/libs/base/base.go @@ -273,7 +273,7 @@ func MkHeadlessServicePod(name string, ns string, podName string, ports []int32, // MkHTTPSRoute produces a Route on top of a Service func MkHTTPSRoute( name string, ns string, host string, serviceName string, path string, - port int, annotations map[string]string, fqdn string, customTLS *apiroutev1.TLSConfig) apiroutev1.Route { + port int, annotations map[string]string, customTLS *apiroutev1.TLSConfig) apiroutev1.Route { tls := apiroutev1.TLSConfig{ InsecureEdgeTerminationPolicy: apiroutev1.InsecureEdgeTerminationPolicyRedirect, Termination: apiroutev1.TLSTerminationEdge, @@ -289,7 +289,7 @@ func MkHTTPSRoute( }, Spec: apiroutev1.RouteSpec{ TLS: &tls, - Host: host + "." + fqdn, + Host: host, To: apiroutev1.RouteTargetReference{ Kind: "Service", Name: serviceName, diff --git a/controllers/logserver_controller.go b/controllers/logserver_controller.go index 989de23a..9d6556e0 100644 --- a/controllers/logserver_controller.go +++ b/controllers/logserver_controller.go @@ -342,8 +342,10 @@ func (r *LogServerController) DeployLogserver() sfv1.LogServerStatus { pvcReadiness := r.reconcileExpandPVC(logserverIdent+"-"+logserverIdent+"-0", r.cr.Spec.Settings.Storage) routeReady := r.ensureHTTPSRoute( - r.cr.Name+"-logserver", logserverIdent, - logserverIdent, "/", httpdPort, map[string]string{}, r.cr.Spec.FQDN, r.cr.Spec.LetsEncrypt) + r.cr.Name+"-logserver", r.cr.Spec.FQDN, + logserverIdent, "/logs/", httpdPort, map[string]string{ + "haproxy.router.openshift.io/rewrite-target": "/", + }, r.cr.Spec.LetsEncrypt) // TODO(mhu) We may want to open an ingress to port 9100 for an external prometheus instance. // TODO(mhu) we may want to include monitoring objects' status in readiness computation diff --git a/controllers/nodepool.go b/controllers/nodepool.go index 1d8b9c83..6ab0b625 100644 --- a/controllers/nodepool.go +++ b/controllers/nodepool.go @@ -603,8 +603,10 @@ func (r *SFController) DeployNodepoolBuilder(statsdExporterVolume apiv1.Volume, pvcReadiness := r.reconcileExpandPVC(builderIdent+"-"+builderIdent+"-0", r.cr.Spec.Nodepool.Builder.Storage) - routeReady := r.ensureHTTPSRoute(r.cr.Name+"-nodepool-builder", "nodepool", builderIdent, "/builds", - buildLogsHttpdPort, map[string]string{}, r.cr.Spec.FQDN, r.cr.Spec.LetsEncrypt) + routeReady := r.ensureHTTPSRoute(r.cr.Name+"-nodepool-builder", r.cr.Spec.FQDN, builderIdent, "/nodepool/builds", + buildLogsHttpdPort, map[string]string{ + "haproxy.router.openshift.io/rewrite-target": "/builds/", + }, r.cr.Spec.LetsEncrypt) var isReady = r.IsStatefulSetReady(current) && routeReady && pvcReadiness @@ -733,8 +735,10 @@ func (r *SFController) DeployNodepoolLauncher(statsdExporterVolume apiv1.Volume, srv := base.MkService(launcherIdent, r.ns, launcherIdent, []int32{launcherPort}, launcherIdent) r.GetOrCreate(&srv) - routeReady := r.ensureHTTPSRoute(r.cr.Name+"-nodepool-launcher", "nodepool", launcherIdent, "/", - launcherPort, map[string]string{}, r.cr.Spec.FQDN, r.cr.Spec.LetsEncrypt) + routeReady := r.ensureHTTPSRoute(r.cr.Name+"-nodepool-launcher", r.cr.Spec.FQDN, launcherIdent, "/nodepool/api", + launcherPort, map[string]string{ + "haproxy.router.openshift.io/rewrite-target": "/", + }, r.cr.Spec.LetsEncrypt) isDeploymentReady := r.IsDeploymentReady(¤t) conds.UpdateConditions(&r.cr.Status.Conditions, launcherIdent, isDeploymentReady) diff --git a/controllers/softwarefactory_controller.go b/controllers/softwarefactory_controller.go index 6ed71245..5361f8d6 100644 --- a/controllers/softwarefactory_controller.go +++ b/controllers/softwarefactory_controller.go @@ -30,6 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" + apiroutev1 "github.com/openshift/api/route/v1" sfv1 "github.com/softwarefactory-project/sf-operator/api/v1" "github.com/softwarefactory-project/sf-operator/controllers/libs/conds" sfmonitoring "github.com/softwarefactory-project/sf-operator/controllers/libs/monitoring" @@ -147,6 +148,14 @@ func (r *SFController) cleanup() { if r.GetM("zuul-monitor", ¤tZPM) { r.DeleteR(¤tZPM) } + + // remove a legacy Route definition for Zuul + r.DeleteR(&apiroutev1.Route{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: r.ns, + Name: r.cr.Spec.FQDN + "-zuul-red", + }, + }) } func (r *SFController) Step() sfv1.SoftwareFactoryStatus { @@ -383,11 +392,7 @@ func (r *SoftwareFactoryReconciler) SetupWithManager(mgr ctrl.Manager) error { switch updatedResourceName := a.GetName(); updatedResourceName { case NodepoolProvidersSecretsName: return req - case GetCustomRouteSSLSecretName("logserver"): - return req - case GetCustomRouteSSLSecretName("nodepool"): - return req - case GetCustomRouteSSLSecretName("zuul"): + case CustomSSLSecretName: return req default: // Discover secrets for github and gitlab connections diff --git a/controllers/static/git-server/update-system-config.sh b/controllers/static/git-server/update-system-config.sh index 6a44d3c3..b468bcbc 100755 --- a/controllers/static/git-server/update-system-config.sh +++ b/controllers/static/git-server/update-system-config.sh @@ -45,7 +45,7 @@ cat << EOF > playbooks/base/pre.yaml - import_role: name: log-inventory vars: - zuul_log_url: "https://logserver.${FQDN}/" + zuul_log_url: "https://${FQDN}/logs" - hosts: all tasks: @@ -83,7 +83,7 @@ cat << EOF > playbooks/base/post.yaml name: buildset-artifacts-location vars: zuul_log_compress: true - zuul_log_url: "https://logserver.${FQDN}/" + zuul_log_url: "https://${FQDN}/logs" zuul_logserver_root: "{{ site_sflogs.path }}" zuul_log_verbose: true EOF diff --git a/controllers/utils.go b/controllers/utils.go index 936cf224..1b39ddc3 100644 --- a/controllers/utils.go +++ b/controllers/utils.go @@ -43,7 +43,10 @@ import ( sfv1 "github.com/softwarefactory-project/sf-operator/api/v1" ) -const BusyboxImage = "quay.io/software-factory/sf-op-busybox:1.5-3" +const ( + BusyboxImage = "quay.io/software-factory/sf-op-busybox:1.5-3" + CustomSSLSecretName = "sf-ssl-cert" +) // HTTPDImage uses pinned/ubi8 based image for httpd // https://catalog.redhat.com/software/containers/ubi8/httpd-24/6065b844aee24f523c207943?q=httpd&architecture=amd64&image=651f274c8ce9242f7bb3e011 @@ -325,7 +328,7 @@ func (r *SFUtilContext) ensureRoute(route apiroutev1.Route, name string) bool { // the route setting changed. func (r *SFUtilContext) ensureHTTPSRoute( name string, host string, serviceName string, path string, - port int, annotations map[string]string, fqdn string, le *sfv1.LetsEncryptSpec) bool { + port int, annotations map[string]string, le *sfv1.LetsEncryptSpec) bool { tlsDataReady := true var sslCA, sslCrt, sslKey []byte @@ -333,11 +336,11 @@ func (r *SFUtilContext) ensureHTTPSRoute( if le == nil { // Letsencrypt config has not been set so we check the `customSSLSecretName` Secret // for any custom TLS data to setup the Route - sslCA, sslCrt, sslKey = r.extractStaticTLSFromSecret(name, host) + sslCA, sslCrt, sslKey = r.extractStaticTLSFromSecret() } else { // Letsencrypt config has been set so we ensure we set a Certificate via the // cert-manager Issuer and then we'll setup the Route based on the Certificate's Secret - tlsDataReady, sslCA, sslCrt, sslKey = r.extractTLSFromLECertificateSecret(name, host, fqdn, *le) + tlsDataReady, sslCA, sslCrt, sslKey = r.extractTLSFromLECertificateSecret(host, *le) } if !tlsDataReady { @@ -356,9 +359,9 @@ func (r *SFUtilContext) ensureHTTPSRoute( Key: string(sslKey), CACertificate: string(sslCA), } - route = base.MkHTTPSRoute(name, r.ns, host, serviceName, path, port, annotations, fqdn, &tls) + route = base.MkHTTPSRoute(name, r.ns, host, serviceName, path, port, annotations, &tls) } else { - route = base.MkHTTPSRoute(name, r.ns, host, serviceName, path, port, annotations, fqdn, nil) + route = base.MkHTTPSRoute(name, r.ns, host, serviceName, path, port, annotations, nil) } return r.ensureRoute(route, name) } @@ -494,15 +497,10 @@ func (r *SFUtilContext) DebugStatefulSet(name string) { r.log.V(1).Info("Debugging service", "name", name) } -func GetCustomRouteSSLSecretName(host string) string { - return host + "-ssl-cert" -} - -func (r *SFUtilContext) extractStaticTLSFromSecret(name string, host string) ([]byte, []byte, []byte) { +func (r *SFUtilContext) extractStaticTLSFromSecret() ([]byte, []byte, []byte) { var customSSLSecret apiv1.Secret - customSSLSecretName := GetCustomRouteSSLSecretName(host) - if !r.GetM(customSSLSecretName, &customSSLSecret) { + if !r.GetM(CustomSSLSecretName, &customSSLSecret) { return nil, nil, nil } else { // Fetching secret expected TLS Keys content @@ -510,23 +508,24 @@ func (r *SFUtilContext) extractStaticTLSFromSecret(name string, host string) ([] } } -func (r *SFUtilContext) extractTLSFromLECertificateSecret(name string, host string, fqdn string, le sfv1.LetsEncryptSpec) (bool, []byte, []byte, []byte) { +func (r *SFUtilContext) extractTLSFromLECertificateSecret(host string, le sfv1.LetsEncryptSpec) (bool, []byte, []byte, []byte) { _, issuerName := getLetsEncryptServer(le) - dnsNames := []string{host + "." + fqdn} - certificate := cert.MkCertificate(name, r.ns, issuerName, dnsNames, name+"-tls", nil) + const sfLECertName = "sf-le-certificate" + dnsNames := []string{host} + certificate := cert.MkCertificate(sfLECertName, r.ns, issuerName, dnsNames, sfLECertName+"-tls", nil) current := certv1.Certificate{} - found := r.GetM(name, ¤t) + found := r.GetM(sfLECertName, ¤t) if !found { - r.log.V(1).Info("Creating Cert-Manager LetsEncrypt Certificate ...", "name", name) + r.log.V(1).Info("Creating Cert-Manager LetsEncrypt Certificate ...", "name", sfLECertName) r.CreateR(&certificate) return false, nil, nil, nil } else { if current.Spec.IssuerRef.Name != certificate.Spec.IssuerRef.Name || !reflect.DeepEqual(current.Spec.DNSNames, certificate.Spec.DNSNames) { // We need to update the Certficate - r.log.V(1).Info("Updating Cert-Manager LetsEncrypt Certificate ...", "name", name) + r.log.V(1).Info("Updating Cert-Manager LetsEncrypt Certificate ...", "name", sfLECertName) current.Spec = *certificate.Spec.DeepCopy() r.UpdateR(¤t) return false, nil, nil, nil @@ -536,7 +535,7 @@ func (r *SFUtilContext) extractTLSFromLECertificateSecret(name string, host stri ready := cert.IsCertificateReady(¤t) if ready { - r.log.V(1).Info("Cert-Manager LetsEncrypt Certificate is Ready ...", "name", name) + r.log.V(1).Info("Cert-Manager LetsEncrypt Certificate is Ready ...", "name", sfLECertName) var leSSLSecret apiv1.Secret if r.GetM(current.Spec.SecretName, &leSSLSecret) { // Extract the TLS material @@ -545,12 +544,12 @@ func (r *SFUtilContext) extractTLSFromLECertificateSecret(name string, host stri } else { // We are not able to find the Certificate's secret r.log.V(1).Info("Cert-Manager LetsEncrypt Certificate is Ready but waiting for the Secret ...", - "name", name, "secret", current.Spec.SecretName) + "name", sfLECertName, "secret", current.Spec.SecretName) return false, nil, nil, nil } } else { // Return false to force a new Reconcile as the certificate is not Ready yet - r.log.V(1).Info("Cert-Manager LetsEncrypt Certificate is not Ready yet ...", "name", name) + r.log.V(1).Info("Cert-Manager LetsEncrypt Certificate is not Ready yet ...", "name", sfLECertName) return false, nil, nil, nil } } diff --git a/controllers/zuul.go b/controllers/zuul.go index ac5698cd..e32a273d 100644 --- a/controllers/zuul.go +++ b/controllers/zuul.go @@ -969,7 +969,7 @@ func (r *SFController) DeployZuul() bool { cfgINI.Section(srv).NewKey("prometheus_port", strconv.Itoa(zuulPrometheusPort)) } // Set Zuul web public URL - cfgINI.Section("web").NewKey("root", "https://zuul."+r.cr.Spec.FQDN) + cfgINI.Section("web").NewKey("root", "https://"+r.cr.Spec.FQDN+"/zuul/") // Set Zuul Merger Configurations if r.cr.Spec.Zuul.Merger.GitUserName != "" { @@ -1031,15 +1031,10 @@ func (r *SFController) runZuulInternalTenantReconfigure() bool { } func (r *SFController) setupZuulIngress() bool { - route1Ready := r.ensureHTTPSRoute(r.cr.Name+"-zuul", "zuul", "zuul-web", "/", zuulWEBPort, - map[string]string{}, r.cr.Spec.FQDN, r.cr.Spec.LetsEncrypt) - - // Zuul ingress is special because the zuul-web container expect the - // the files to be served at `/zuul/`, but it is listening on `/`. - // Thus this ingress remove the `/zuul/` so that the javascript loads as - // expected - route2Ready := r.ensureHTTPSRoute(r.cr.Name+"-zuul-red", "zuul", "zuul-web", "/zuul", zuulWEBPort, map[string]string{ - "haproxy.router.openshift.io/rewrite-target": "/", - }, r.cr.Spec.FQDN, r.cr.Spec.LetsEncrypt) - return route1Ready && route2Ready + route1Ready := r.ensureHTTPSRoute(r.cr.Name+"-zuul", r.cr.Spec.FQDN, "zuul-web", "/zuul", zuulWEBPort, + map[string]string{ + "haproxy.router.openshift.io/rewrite-target": "/", + }, r.cr.Spec.LetsEncrypt) + + return route1Ready } diff --git a/doc/deployment/certificates.md b/doc/deployment/certificates.md index 63114704..361a4804 100644 --- a/doc/deployment/certificates.md +++ b/doc/deployment/certificates.md @@ -2,55 +2,45 @@ ## Table of Contents -1. [Using existing X509 certificates](#using-x509-certificates) +1. [Using a trusted Certificate Authority](#using-a-trusted-certificate-authority) 1. [Using Let's Encrypt](#using-lets-encrypt) -By default, a SF deployment comes with a self-signed Certificate Authority delivered by the [cert-manager operator](https://cert-manager.io/), and HTTP services in the deployment get certificates from this CA to enable secure ingress with TLS. The cert-manager operator also **simplifies certificates lifecycle managagement by handling renewals and routes reconfigurations automatically**. +By default, a SF deployment comes with a self-signed Certificate Authority delivered by the Ingress manager, and HTTPS services in the deployment get certificates from this CA to enable secure ingress with TLS. -Currently, the list of concerned HTTP services is: +Currently, the list of concerned HTTPS services is: - logserver HTTP endpoint -- nodepool web API +- nodepool web API and nodepool build's logs - zuul-web -While this is good enough for testing, the cert-manager operator also allows you to integrate your deployment with an existing, trusted Certificate Authority, or even use [Let's Encrypt](https://letsencrypt.org/). +While this is good enough for testing, the sf-operator allows you to integrate your deployment with an existing, trusted Certificate Authority, or even use [Let's Encrypt](https://letsencrypt.org/). -## Using X509 certificates +## Using a trusted Certificate Authority -The operator watches specific `Secrets` in the `SoftwareFactory` Custom Resources namespace. -When those secrets' data hold a Certificate, Key and CA Certificate (following a specific scheme) then -the sf-operator is able to reconfigure the corresponding service `Route`'s TLS to use the TLS material -stored in the secret. +The operator watches a `Secret` named `sf-ssl-cert` in the `SoftwareFactory` Custom Resources namespace. +When this `Secret`'s data hold a Certificate, Key and CA Certificate (following a specific scheme) then +the sf-operator is able to reconfigure all managed `Route`'s TLS to use the TLS material stored in the secret. The `sfconfig` command can be used to configure these secrets. > The `create-service-ssl-secret` subcommand will validate the SSL certificate/key before updating the `Secret`. -The example below updates the `Secret` for the `logserver` service. The `SoftwareFactory` Custom -Resource will pass into a "non Ready" state until the `Route` is reconfigured. -Once `Ready`, the `Route` will present the new Certificate. +The `SoftwareFactory` Custom Resource will pass into a "non Ready" state until reconfiguration is completed. +Once `Ready`, all managed `Route` will present the new certificate. ```sh ./tools/sfconfig create-service-ssl-secret \ --sf-service-ca /tmp/ssl/localCA.pem \ --sf-service-key /tmp/ssl/ssl.key \ - --sf-service-cert /tmp/ssl/ssl.crt \ - --sf-service-name logserver + --sf-service-cert /tmp/ssl/ssl.crt ``` -Allowed `sf-service-name` values are: - - - logserver - - zuul - - nodepool - - gerrit (if deployed with the CLI) - ## Using Let's Encrypt -The SF Operator offers an option to request Certificates from `Let's Encrypt` using the `ACME http01` -challenge. All DNS names exposed by the `Routes` must be publicly resolvable. +The SF Operator offers an option to request a certificate from `Let's Encrypt` using the `ACME http01` +challenge. The deployment `FQDN` must be publicly resolvable. -> This overrides any custom X509 certificates that might have been set following the steps above. +> This overrides the custom X509 certificates that might have been set following the [steps above](#using-a-trusted-certificate-authority). 1. test your deployment with Let's Encrypt's staging server: @@ -77,4 +67,5 @@ spec: [...] ``` -Once the `SoftwareFactory` Custom Resource is ready, your services are using certificates issued by Let's Encrypt. \ No newline at end of file +Once the `SoftwareFactory` Custom Resource is ready, all managed `Route` will present the new certificate +issued by Let's Encrypt. \ No newline at end of file diff --git a/doc/deployment/getting_started.md b/doc/deployment/getting_started.md index 4e263482..4af3c2bc 100644 --- a/doc/deployment/getting_started.md +++ b/doc/deployment/getting_started.md @@ -58,19 +58,15 @@ NAME READY my-sf true ``` +The `sf-operator` handles the `Route`s installation. Here is the lists of available +endpoints: -The following `Routes` (or `Ingresses`) are created: +- https://sfop.me/zuul +- https://sfop.me/logs +- https://sfop.me/nodepool/api/image-list +- https://sfop.me/nodepool/builds -``` -kubectl -n sf get routes -o custom-columns=HOST:.spec.host - -HOST -zuul.sfop.me -logserver.sfop.me -nodepool.sfop.me -``` - -At that point you have successfully deployed a **SoftwareFactory** instance. You can access the Zuul Web UI at https://zuul.sfop.me . +At that point you have successfully deployed a **SoftwareFactory** instance. You can access the Zuul Web UI at https://sfop.me/zuul. ## Next steps diff --git a/doc/deployment/nodepool.md b/doc/deployment/nodepool.md index 0ab48bc2..0e5c6153 100644 --- a/doc/deployment/nodepool.md +++ b/doc/deployment/nodepool.md @@ -165,4 +165,4 @@ np0000000001 Nodepool exposes some [API endpoints](https://zuul-ci.org/docs/nodepool/latest/operation.html#web-interface). -For instance, to reach the `image-list` endpoint a user can access the following URL: `https://nodepool./image-list`. +For instance, to reach the `image-list` endpoint a user can access the following URL: `https:///nodepool/api/image-list`. diff --git a/doc/developer/getting_started.md b/doc/developer/getting_started.md index 7d5886e1..4f903796 100644 --- a/doc/developer/getting_started.md +++ b/doc/developer/getting_started.md @@ -118,11 +118,12 @@ Each change to the `CR`, passed as parameter, will require a new run of the comm You can verify that the services are properly exposed with Firefox (you may have to accept insecure connections when deploying with the default self-signed CA): ```sh -firefox https://zuul. +firefox https:///zuul +firefox https:///logs +firefox https:///nodepool/api/image-list +firefox https:///nodepool/builds firefox https://gerrit. -firefox https://logserver. firefox https://prometheus. -firefox https://nodepool. ``` ## Delete the development deployment diff --git a/doc/reference/api/index.md b/doc/reference/api/index.md index a4665c21..046d759e 100644 --- a/doc/reference/api/index.md +++ b/doc/reference/api/index.md @@ -245,7 +245,7 @@ _Appears in:_ | Field | Description | Default Value | | --- | --- | --- | -| `fqdn` _string_ | The fully qualified domain name to use with the log server. Logs will be served at https://logserver.`FQDN` | -| +| `fqdn` _string_ | The fully qualified domain name to use with the log server. Logs will be served at https://`FQDN`/logs | -| | `LetsEncrypt` _[LetsEncryptSpec](#letsencryptspec)_ | LetsEncrypt settings for enabling using LetsEncrypt for Routes/TLS | -| | `storageClassName` _string_ | Default storage class to use with Persistent Volume Claims issued by this resource. Consult your cluster's configuration to see what storage classes are available and recommended for your use case. | -| | `authorizedSSHKey` _string_ | The SSH public key, encoded as base64, to use to authorize file transfers on the log server | -| diff --git a/doc/reference/cli/index.md b/doc/reference/cli/index.md index 0a58c96c..3c5b5ff1 100644 --- a/doc/reference/cli/index.md +++ b/doc/reference/cli/index.md @@ -108,7 +108,6 @@ Flags: |--sf-service-ca |string | Path for the service CA certificate| - | |--sf-service-cert |string | Path for the service certificate file| - | |--sf-service-key |string | Path for the service private key file| - | -|--sf-service-name s|tring | The SF service name for the SSL certificate like Zuul, Gerrit, Logserver etc.| - | See [this section in the deployment documentation](./../deployment/certificates.md#using-x509-certificates) for more details. diff --git a/doc/user/nodepool_config_repository.md b/doc/user/nodepool_config_repository.md index 66d7d35d..ce0f5e5e 100644 --- a/doc/user/nodepool_config_repository.md +++ b/doc/user/nodepool_config_repository.md @@ -173,6 +173,6 @@ Once these three files `nodepool/dib-ansible/inventory.yaml`, `nodepool/dib-ansi > At the first connection attempt of the `nodepool-builder` to an `image-builder` host, Ansible will refuse to connect because the SSH Host key is not known. Please refer to the section [Accept an image-builder's SSH Host key](../deployment/nodepool#accept-an-image-builders-ssh-host-key). -The image builds status can be consulted by accessing this endpoint: `https://nodepool./dib-image-list`. +The image builds status can be consulted by accessing this endpoint: `https:///nodepool/api/dib-image-list`. -The image builds logs can be consulted by accessing this endpoint: `https://nodepool./builds`. \ No newline at end of file +The image builds logs can be consulted by accessing this endpoint: `https:///nodepool/builds`. \ No newline at end of file diff --git a/playbooks/group_vars/all.yaml b/playbooks/group_vars/all.yaml index 06b345ed..8924603b 100644 --- a/playbooks/group_vars/all.yaml +++ b/playbooks/group_vars/all.yaml @@ -4,20 +4,18 @@ fqdn: sfop.me validate_certs: false -gerrit_host: "gerrit.{{ fqdn }}" -zuul_host: "zuul.{{ fqdn }}" -logserver_host: "logserver.{{ fqdn }}" -nodepool_host: "nodepool.{{ fqdn }}" +zuul_endpoint: "{{ fqdn }}/zuul" +logserver_endpoint: "{{ fqdn }}/logs" +nodepool_endpoint: "{{ fqdn }}/nodepool" prometheus_host: "prometheus.{{ fqdn }}" +gerrit_host: "gerrit.{{ fqdn }}" hosts: + - "{{ fqdn }}" - "{{ gerrit_host }}" - - "{{ zuul_host }}" - - "{{ logserver_host }}" - - "{{ nodepool_host }}" - "{{ prometheus_host }}" logserver_copy_content_dest: "/tmp/logserver-content" zuul_api_retries: 60 -zuul_api_delay: 10 \ No newline at end of file +zuul_api_delay: 10 diff --git a/roles/health-check/check-service-uri/tasks/main.yaml b/roles/health-check/check-service-uri/tasks/main.yaml index d3e356f5..898564fc 100644 --- a/roles/health-check/check-service-uri/tasks/main.yaml +++ b/roles/health-check/check-service-uri/tasks/main.yaml @@ -17,7 +17,7 @@ - name: Attempt to access Zuul info via API ansible.builtin.uri: - url: "https://{{ zuul_host }}/api/info" + url: "https://{{ zuul_endpoint }}/api/info" method: GET return_content: true validate_certs: "{{ validate_certs }}" @@ -31,7 +31,7 @@ - name: Attempt to access Zuul pipelines for internal tenant via API ansible.builtin.uri: - url: "https://{{ zuul_host }}/api/tenant/internal/pipelines" + url: "https://{{ zuul_endpoint }}/api/tenant/internal/pipelines" method: GET return_content: true validate_certs: "{{ validate_certs }}" @@ -45,7 +45,7 @@ - name: Attempt to access Zuul web status page ansible.builtin.uri: - url: "https://{{ zuul_host }}/status" + url: "https://{{ zuul_endpoint }}/status" method: GET validate_certs: "{{ validate_certs }}" status_code: [200, 503] @@ -57,7 +57,7 @@ - name: Attempt to access Zuul web internal tenant status page ansible.builtin.uri: - url: "https://{{ zuul_host }}/internal/status" + url: "https://{{ zuul_endpoint }}/internal/status" method: GET validate_certs: "{{ validate_certs }}" status_code: [200, 503] @@ -69,7 +69,7 @@ - name: Attempt to access Logserver web ansible.builtin.uri: - url: "https://{{ logserver_host }}/" + url: "https://{{ logserver_endpoint }}/" method: GET validate_certs: "{{ validate_certs }}" status_code: [200, 503] @@ -80,7 +80,7 @@ - name: Attempt to access Nodepool launcher API ansible.builtin.uri: - url: "https://{{ nodepool_host }}/ready" + url: "https://{{ nodepool_endpoint }}/api/ready" method: GET return_content: true validate_certs: "{{ validate_certs }}" @@ -93,7 +93,7 @@ - name: Attempt to access Nodepool builder build logs ansible.builtin.uri: - url: "https://{{ nodepool_host }}/builds" + url: "https://{{ nodepool_endpoint }}/builds" method: GET return_content: true validate_certs: "{{ validate_certs }}" diff --git a/roles/health-check/config-repo-submit-change/tasks/ensure-config-check-failure.yaml b/roles/health-check/config-repo-submit-change/tasks/ensure-config-check-failure.yaml index 1b080f52..7222c2af 100644 --- a/roles/health-check/config-repo-submit-change/tasks/ensure-config-check-failure.yaml +++ b/roles/health-check/config-repo-submit-change/tasks/ensure-config-check-failure.yaml @@ -11,7 +11,7 @@ - name: Wait config-check FAILURE result using zuul-web API ansible.builtin.uri: - url: "https://{{ zuul_host }}/api/tenant/{{ zuul_tenant | default('internal') }}/builds?job_name=config-check&change={{ json_reply._number }}" + url: "https://{{ zuul_endpoint }}/api/tenant/{{ zuul_tenant | default('internal') }}/builds?job_name=config-check&change={{ json_reply._number }}" return_content: true body_format: json validate_certs: "{{ validate_certs }}" diff --git a/roles/health-check/config-repo-submit-change/tasks/ensure-config-update-success.yaml b/roles/health-check/config-repo-submit-change/tasks/ensure-config-update-success.yaml index 30dd2f97..f279f350 100644 --- a/roles/health-check/config-repo-submit-change/tasks/ensure-config-update-success.yaml +++ b/roles/health-check/config-repo-submit-change/tasks/ensure-config-update-success.yaml @@ -7,7 +7,7 @@ - name: Wait config-update SUCCESS result using zuul-web API ansible.builtin.uri: - url: "https://{{ zuul_host }}/api/tenant/{{ zuul_tenant | default('internal') }}/builds?job_name=config-update&newrev={{ commitsha.stdout }}" + url: "https://{{ zuul_endpoint }}/api/tenant/{{ zuul_tenant | default('internal') }}/builds?job_name=config-update&newrev={{ commitsha.stdout }}" return_content: true body_format: json validate_certs: false diff --git a/roles/health-check/config-repo-submit-change/tasks/ensure-zuul-console-success.yaml b/roles/health-check/config-repo-submit-change/tasks/ensure-zuul-console-success.yaml index 542326b2..60163803 100644 --- a/roles/health-check/config-repo-submit-change/tasks/ensure-zuul-console-success.yaml +++ b/roles/health-check/config-repo-submit-change/tasks/ensure-zuul-console-success.yaml @@ -2,7 +2,7 @@ # FROM: https://opendev.org/zuul/zuul-operator/src/branch/master/playbooks/zuul-operator-functional/test.yaml#L157-L187 - name: Wait for config-update post job ansible.builtin.shell: | - curl -sk https://{{ zuul_host }}/api/tenant/internal/status | jq -r '.pipelines[].change_queues[].heads[][].jobs[]' | jq -rc 'select(.name == "config-update")' | jq -r '.uuid' + curl -sk https://{{ zuul_endpoint }}/api/tenant/internal/status | jq -r '.pipelines[].change_queues[].heads[][].jobs[]' | jq -rc 'select(.name == "config-update")' | jq -r '.uuid' register: _job_uuid until: _job_uuid.stdout != "" and "null" not in _job_uuid.stdout retries: "{{ zuul_api_retries }}" @@ -12,7 +12,7 @@ - name: Connect and validate console stream ansible.builtin.shell: | - (sleep 5; echo "") | wsdump --nocert -r -t '{"uuid":"{{ _job_uuid.stdout_lines[0] }}","logfile":"console.log"}' wss://{{ zuul_host }}/api/tenant/internal/console-stream + (sleep 5; echo "") | wsdump --nocert -r -t '{"uuid":"{{ _job_uuid.stdout_lines[0] }}","logfile":"console.log"}' wss://{{ zuul_endpoint }}/api/tenant/internal/console-stream register: console_stream until: "'Job console starting...' in console_stream.stdout" retries: "{{ zuul_api_retries }}" diff --git a/roles/health-check/config-update-nodepool-launcher/tasks/main.yaml b/roles/health-check/config-update-nodepool-launcher/tasks/main.yaml index 3e8ef323..1ad3610d 100644 --- a/roles/health-check/config-update-nodepool-launcher/tasks/main.yaml +++ b/roles/health-check/config-update-nodepool-launcher/tasks/main.yaml @@ -134,7 +134,7 @@ - name: Ensure the new label listed in the Nodepool API ansible.builtin.uri: - url: "https://{{ nodepool_host }}/label-list" + url: "https://{{ nodepool_endpoint }}/api/label-list" method: GET validate_certs: "{{ validate_certs }}" status_code: [200, 503] @@ -148,7 +148,7 @@ - name: Ensure the new label listed in the Zuul API ansible.builtin.uri: - url: "https://{{ zuul_host }}/api/tenant/internal/labels" + url: "https://{{ zuul_endpoint }}/api/tenant/internal/labels" method: GET validate_certs: "{{ validate_certs }}" status_code: [200, 503] @@ -158,4 +158,4 @@ - this.status == 200 - zuul_worker_ci_label in this.content retries: "{{ zuul_api_retries }}" - delay: "{{ zuul_api_delay }}" \ No newline at end of file + delay: "{{ zuul_api_delay }}" diff --git a/roles/health-check/config-update-zuul/defaults/main.yaml b/roles/health-check/config-update-zuul/defaults/main.yaml index 38593c43..93ca673d 100644 --- a/roles/health-check/config-update-zuul/defaults/main.yaml +++ b/roles/health-check/config-update-zuul/defaults/main.yaml @@ -1,6 +1,6 @@ --- fqdn: sfop.me random: dummy -zuul_host: zuul.{{ fqdn }} +zuul_endpoint: "{{ fqdn }}/zuul" zuul_api_delay: 10 -zuul_api_retries: 60 \ No newline at end of file +zuul_api_retries: 60 diff --git a/roles/health-check/config-update-zuul/tasks/main.yaml b/roles/health-check/config-update-zuul/tasks/main.yaml index 7cc49cb1..430d5a87 100644 --- a/roles/health-check/config-update-zuul/tasks/main.yaml +++ b/roles/health-check/config-update-zuul/tasks/main.yaml @@ -2,7 +2,7 @@ # First let's ensure that the internal tenant does not contain any Zuul configuration error - name: Get configuration errors/warnings into the internal tenant ansible.builtin.uri: - url: "https://{{ zuul_host }}/api/tenant/internal/config-errors" + url: "https://{{ zuul_endpoint }}/api/tenant/internal/config-errors" validate_certs: "{{ validate_certs }}" method: GET return_content: true @@ -92,7 +92,7 @@ - name: Get Zuul projects list ansible.builtin.uri: - url: "https://{{ zuul_host }}/api/tenant/{{ tenant_name }}/projects" + url: "https://{{ zuul_endpoint }}/api/tenant/{{ tenant_name }}/projects" validate_certs: "{{ validate_certs }}" method: GET register: zuul_tenant_projects diff --git a/roles/health-check/pod-spawning/tasks/main.yaml b/roles/health-check/pod-spawning/tasks/main.yaml index 1b6ea892..ab38195f 100644 --- a/roles/health-check/pod-spawning/tasks/main.yaml +++ b/roles/health-check/pod-spawning/tasks/main.yaml @@ -115,7 +115,7 @@ - name: Wait for microshift-pod job ansible.builtin.shell: | - curl -sk https://{{ zuul_host }}/api/tenant/internal/status | jq -r '.pipelines[].change_queues[].heads[][].jobs[]' | jq -rc 'select(.name == "microshift-pod")' | jq -r '.uuid' + curl -sk https://{{ zuul_endpoint }}/api/tenant/internal/status | jq -r '.pipelines[].change_queues[].heads[][].jobs[]' | jq -rc 'select(.name == "microshift-pod")' | jq -r '.uuid' register: _job_uuid until: _job_uuid.stdout != "" and "null" not in _job_uuid.stdout retries: "{{ zuul_api_retries }}" @@ -125,7 +125,7 @@ - name: Wait for last result for microshift-pod using zuul-web API ansible.builtin.uri: - url: "https://{{ zuul_host }}/api/tenant/{{ zuul_tenant | default('internal') }}/builds?job_name=microshift-pod" + url: "https://{{ zuul_endpoint }}/api/tenant/{{ zuul_tenant | default('internal') }}/builds?job_name=microshift-pod" return_content: true body_format: json validate_certs: false diff --git a/roles/health-check/test-cert-manager-letsencrypt/defaults/main.yaml b/roles/health-check/test-cert-manager-letsencrypt/defaults/main.yaml new file mode 100644 index 00000000..31eb8460 --- /dev/null +++ b/roles/health-check/test-cert-manager-letsencrypt/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +fqdn: sfop.me diff --git a/roles/health-check/test-cert-manager-letsencrypt/tasks/main.yaml b/roles/health-check/test-cert-manager-letsencrypt/tasks/main.yaml index 78a771f6..51d06af2 100644 --- a/roles/health-check/test-cert-manager-letsencrypt/tasks/main.yaml +++ b/roles/health-check/test-cert-manager-letsencrypt/tasks/main.yaml @@ -11,31 +11,12 @@ # We do not wait for the CR status to be ready because it will never happen as the # CI deployment Route URL/Host cannot be resolved and thus the http01 challenge will # fails. -# However here we are able to check that 'Certificate resources' are created for each -# related 'Route' and we can assert that each 'Certificate.Status' is not ready due to -# http01 challenge failure. +# However here we are able to check that a 'Certificate resources''s challenge is created but +# does not become ready due to http01 challenge failure. # This partialy verifies the flow with Let's Encrypt. - name: Ensure nodepool service Certificate not Ready for expected reason - shell: "kubectl -n sf get challenge -o json | grep nodepool" - register: result - until: - - result is success - - "'DNS problem: NXDOMAIN' in result.stdout or 'Waiting for HTTP-01 challenge propagation' in result.stdout or 'no such host' in result.stdout" - retries: 6 - delay: 10 - -- name: Ensure zuul service Certificate not Ready for expected reason - shell: "kubectl -n sf get challenge -o json | grep zuul" - register: result - until: - - result is success - - "'DNS problem: NXDOMAIN' in result.stdout or 'Waiting for HTTP-01 challenge propagation' in result.stdout or 'no such host' in result.stdout" - retries: 6 - delay: 10 - -- name: Ensure logserver service Certificate not Ready for expected reason - shell: "kubectl -n sf get challenge -o json | grep logserver" + shell: "kubectl -n sf get challenge -o json | grep {{ fqdn }}" register: result until: - result is success diff --git a/roles/health-check/test-custom-route-certs/defaults/main.yaml b/roles/health-check/test-custom-route-certs/defaults/main.yaml index f2866c61..fa3dc635 100644 --- a/roles/health-check/test-custom-route-certs/defaults/main.yaml +++ b/roles/health-check/test-custom-route-certs/defaults/main.yaml @@ -6,7 +6,4 @@ state_or_province_name: Dolnoslaskie locality_name: Wroclaw fqdn: sfop.me ca_common_name: "{{ fqdn }}" -common_name: "{{ service_name_custom_ssl }}.{{ ca_common_name }}" -common_name_alt: "{{ ca_common_name }}" - -service_name_custom_ssl: zuul +common_name: "{{ ca_common_name }}" diff --git a/roles/health-check/test-custom-route-certs/tasks/add_ssl_secret.yaml b/roles/health-check/test-custom-route-certs/tasks/add_ssl_secret.yaml index ff94e1d6..c47b43bf 100644 --- a/roles/health-check/test-custom-route-certs/tasks/add_ssl_secret.yaml +++ b/roles/health-check/test-custom-route-certs/tasks/add_ssl_secret.yaml @@ -6,6 +6,5 @@ --sf-service-ca {{ ssl_path }}/localCA.pem --sf-service-key {{ ssl_path }}/ssl.key --sf-service-cert {{ ssl_path }}/ssl.crt - --sf-service-name {{ service_name_custom_ssl }} args: chdir: "{{ zuul.project.src_dir | default(src_dir) }}" diff --git a/roles/health-check/test-custom-route-certs/tasks/check_route.yaml b/roles/health-check/test-custom-route-certs/tasks/check_route.yaml index b0cd5e00..b8944415 100644 --- a/roles/health-check/test-custom-route-certs/tasks/check_route.yaml +++ b/roles/health-check/test-custom-route-certs/tasks/check_route.yaml @@ -5,7 +5,7 @@ - name: Verify that the SSL is in the secret ansible.builtin.shell: > - kubectl get secret {{ service_name_custom_ssl }}-ssl-cert -o json | + kubectl get secret sf-ssl-cert -o json | jq -r ".data.crt" | base64 -d register: _new_cert_secret @@ -14,13 +14,19 @@ that: - _ssl_content.stdout == _new_cert_secret.stdout -- name: Make a query to validate that the route exposes the expected certificate - ansible.builtin.shell: > - openssl s_client - -showcerts - -servername {{ common_name }} - -connect {{ common_name }}:443 +# From here we validate all Route handled by the sf-operator to ensure that curl +# is exposed to the right certificate + +- name: Make a query to validate that the Routes expose the expected certificate + ansible.builtin.shell: curl -kv https://{{ item }} register: _new_cert_subj - until: _ssl_content.stdout in _new_cert_subj.stdout - retries: 10 + loop: + - "{{ zuul_endpoint }}" + - "{{ nodepool_endpoint }}/builds" + - "{{ nodepool_endpoint }}/api" + - "{{ logserver_endpoint }}" + until: + - ca_common_name in _new_cert_subj.stderr or ca_common_name in _new_cert_subj.stdout + - random_state_or_province_name in _new_cert_subj.stderr or random_state_or_province_name in _new_cert_subj.stdout + retries: 5 delay: 3 diff --git a/roles/health-check/test-custom-route-certs/tasks/main.yaml b/roles/health-check/test-custom-route-certs/tasks/main.yaml index fd19f816..2028b612 100644 --- a/roles/health-check/test-custom-route-certs/tasks/main.yaml +++ b/roles/health-check/test-custom-route-certs/tasks/main.yaml @@ -7,6 +7,7 @@ - set_fact: ssl_path: "{{ tempdir.path }}" + random_state_or_province_name: "{{ lookup('community.general.random_string', special=false, length=8) }}" ### CA ### - name: Gen CA privkey @@ -20,7 +21,7 @@ country_name: "{{ country_name }}" organization_name: "{{ org_name }}" common_name: "{{ ca_common_name }}" - state_or_province_name: "{{ state_or_province_name }}" + state_or_province_name: "{{ random_state_or_province_name }}" locality_name: "{{ locality_name }}" organizational_unit_name: "{{ organizational_unit_name }}" basic_constraints: diff --git a/roles/health-check/zuul-authenticators/tasks/main.yaml b/roles/health-check/zuul-authenticators/tasks/main.yaml index 6d415b27..503650f4 100644 --- a/roles/health-check/zuul-authenticators/tasks/main.yaml +++ b/roles/health-check/zuul-authenticators/tasks/main.yaml @@ -17,7 +17,7 @@ - name: Wait for the new authenticator to appear in the Zuul API ansible.builtin.uri: - url: "https://{{ zuul_host }}/api/tenant/internal/info" + url: "https://{{ zuul_endpoint }}/api/tenant/internal/info" method: GET return_content: true validate_certs: "{{ validate_certs }}" @@ -36,7 +36,7 @@ - name: Wait for the test authenticator to be delisted ansible.builtin.uri: - url: "https://{{ zuul_host }}/api/tenant/internal/info" + url: "https://{{ zuul_endpoint }}/api/tenant/internal/info" method: GET return_content: true validate_certs: "{{ validate_certs }}" diff --git a/roles/health-check/zuul-components/tasks/main.yaml b/roles/health-check/zuul-components/tasks/main.yaml index 858c4169..6bfb9eca 100644 --- a/roles/health-check/zuul-components/tasks/main.yaml +++ b/roles/health-check/zuul-components/tasks/main.yaml @@ -1,6 +1,6 @@ - name: Get Zuul Components ansible.builtin.uri: - url: https://{{ zuul_host }}/zuul/api/components + url: https://{{ zuul_endpoint }}/api/components status_code: [200] method: GET validate_certs: "{{ validate_certs }}" diff --git a/roles/health-check/zuul-connections/tasks/main.yaml b/roles/health-check/zuul-connections/tasks/main.yaml index c03f86bf..01a18b98 100644 --- a/roles/health-check/zuul-connections/tasks/main.yaml +++ b/roles/health-check/zuul-connections/tasks/main.yaml @@ -73,7 +73,7 @@ - name: Wait for the new Zuul connections to appear in the Zuul API ansible.builtin.uri: - url: "https://{{ zuul_host }}/api/connections" + url: "https://{{ zuul_endpoint }}/api/connections" method: GET return_content: true validate_certs: "{{ validate_certs }}" @@ -106,7 +106,7 @@ - name: Wait for the dummy Zuul connections to be removed from the API ansible.builtin.uri: - url: "https://{{ zuul_host }}/api/connections" + url: "https://{{ zuul_endpoint }}/api/connections" method: GET return_content: true validate_certs: "{{ validate_certs }}" diff --git a/tools/microshift/local-setup.yaml b/tools/microshift/local-setup.yaml index aa764de8..e5d35321 100644 --- a/tools/microshift/local-setup.yaml +++ b/tools/microshift/local-setup.yaml @@ -12,8 +12,5 @@ path: /etc/hosts block: | {{ microshift_ip }} {{ microshift_fqdn }} - {{ microshift_ip }} zuul.{{ sf_fqdn }} - {{ microshift_ip }} gerrit.{{ sf_fqdn }} - {{ microshift_ip }} logserver.{{ sf_fqdn }} - {{ microshift_ip }} nodepool.{{ sf_fqdn }} + {{ microshift_ip }} {{ sf_fqdn }} become: yes