diff --git a/Makefile b/Makefile index ce6aa53c..0fcd6ba7 100644 --- a/Makefile +++ b/Makefile @@ -396,6 +396,12 @@ kuttl-test-cleanup: if [ "$(KUTTL_SUITE)" == "ceilometer" ]; then \ oc delete --wait=true --all=true -n $(KUTTL_NAMESPACE) --timeout=120s Ceilometer; \ fi; \ + if [ "$(KUTTL_SUITE)" == "metric-storage" ]; then \ + oc delete --wait=true --all=true -n $(KUTTL_NAMESPACE) --timeout=120s MetricStorage; \ + fi; \ + if [ "$(KUTTL_SUITE)" == "cloudkitty" ]; then \ + oc delete --wait=true --all=true -n $(KUTTL_NAMESPACE) --timeout=120s CloudKitty; \ + fi; \ if [ "$(KUTTL_SUITE)" == "default" ]; then \ oc delete --wait=true --all=true -n $(KUTTL_NAMESPACE) --timeout=120s Telemetry; \ fi; \ diff --git a/PROJECT b/PROJECT index 5230867a..58516761 100644 --- a/PROJECT +++ b/PROJECT @@ -1,7 +1,3 @@ -# Code generated by tool. DO NOT EDIT. -# This file is used to track the info used to scaffold your project -# and allow the plugins properly work. -# More info: https://book.kubebuilder.io/reference/project-config.html domain: openstack.org layout: - go.kubebuilder.io/v3 @@ -69,4 +65,31 @@ resources: defaulting: true validation: true webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: openstack.org + group: telemetry + kind: CloudKittyApi + path: github.com/openstack-k8s-operators/telemetry-operator/api/v1beta1 + version: v1beta1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: openstack.org + group: telemetry + kind: CloudKittyProc + path: github.com/openstack-k8s-operators/telemetry-operator/api/v1beta1 + version: v1beta1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: openstack.org + group: telemetry + kind: CloudKitty + path: github.com/openstack-k8s-operators/telemetry-operator/api/v1beta1 + version: v1beta1 version: "3" diff --git a/api/bases/telemetry.openstack.org_autoscalings.yaml b/api/bases/telemetry.openstack.org_autoscalings.yaml index 7c7d682b..6b6d7c53 100644 --- a/api/bases/telemetry.openstack.org_autoscalings.yaml +++ b/api/bases/telemetry.openstack.org_autoscalings.yaml @@ -293,6 +293,11 @@ spec: description: CeilometerService - Selector to get the ceilometer service password from the Secret type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string type: object preserveJobs: default: false diff --git a/api/bases/telemetry.openstack.org_ceilometers.yaml b/api/bases/telemetry.openstack.org_ceilometers.yaml index 99fd948a..68c4de18 100644 --- a/api/bases/telemetry.openstack.org_ceilometers.yaml +++ b/api/bases/telemetry.openstack.org_ceilometers.yaml @@ -209,6 +209,11 @@ spec: description: CeilometerService - Selector to get the ceilometer service password from the Secret type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string type: object proxyImage: type: string diff --git a/api/bases/telemetry.openstack.org_cloudkitties.yaml b/api/bases/telemetry.openstack.org_cloudkitties.yaml new file mode 100644 index 00000000..5b5b615e --- /dev/null +++ b/api/bases/telemetry.openstack.org_cloudkitties.yaml @@ -0,0 +1,809 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: cloudkitties.telemetry.openstack.org +spec: + group: telemetry.openstack.org + names: + kind: CloudKitty + listKind: CloudKittyList + plural: cloudkitties + singular: cloudkitty + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: CloudKitty is the Schema for the cloudkitties API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudKittySpec defines the desired state of CloudKitty + properties: + apiTimeout: + default: 60 + description: APITimeout for HAProxy, Apache, and rpc_response_timeout + type: integer + cloudKittyAPI: + description: CloudKittyAPI - Spec definition for the API service of + this CloudKitty deployment + properties: + containerImage: + description: ContainerImage - CloudKitty Container Image URL (will + be set to environmental default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + customServiceConfigSecrets: + description: |- + CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + that contain sensitive service config data. The content of each Secret gets added to the + /etc//.conf.d directory as a custom config file. + items: + type: string + type: array + x-kubernetes-list-type: atomic + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment + resource names to expose the services to the given network + items: + type: string + type: array + x-kubernetes-list-type: atomic + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting here overrides + any global NodeSelector settings within the CloudKitty CR. + type: object + override: + description: Override, provides the ability to override the generated + manifest of several child resources. + properties: + service: + additionalProperties: + description: |- + RoutedOverrideSpec - a routed service override configuration for the Service created to serve traffic + to the cluster. Allows for the manifest of the created Service to be overwritten with custom configuration. + properties: + endpointURL: + type: string + metadata: + description: |- + EmbeddedLabelsAnnotations is an embedded subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta. + Only labels and annotations are included. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + type: object + type: object + spec: + description: |- + OverrideServiceSpec is a subset of the fields included in https://pkg.go.dev/k8s.io/api@v0.26.6/core/v1#ServiceSpec + Limited to Type, SessionAffinity, LoadBalancerSourceRanges, ExternalName, ExternalTrafficPolicy, SessionAffinityConfig, + IPFamilyPolicy, LoadBalancerClass and InternalTrafficPolicy + properties: + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the + configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + description: |- + Override configuration for the Service created to serve traffic to the cluster. + The key must be the endpoint type (public, internal) + type: object + type: object + replicas: + default: 1 + description: Replicas - CloudKitty API Replicas + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tls: + description: TLS - Parameters related to the TLS + properties: + api: + description: API tls type which encapsulates for API services + properties: + internal: + description: Internal GenericService - holds the secret + for the internal endpoint + properties: + secretName: + description: SecretName - holding the cert, key for + the service + type: string + type: object + public: + description: Public GenericService - holds the secret + for the public endpoint + properties: + secretName: + description: SecretName - holding the cert, key for + the service + type: string + type: object + type: object + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs in + a pre-created bundle file + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service + references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + type: object + cloudKittyProc: + description: CloudKittyProc - Spec definition for the Scheduler service + of this CloudKitty deployment + properties: + containerImage: + description: ContainerImage - CloudKitty Container Image URL (will + be set to environmental default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + customServiceConfigSecrets: + description: |- + CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + that contain sensitive service config data. The content of each Secret gets added to the + /etc//.conf.d directory as a custom config file. + items: + type: string + type: array + x-kubernetes-list-type: atomic + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment + resource names to expose the services to the given network + items: + type: string + type: array + x-kubernetes-list-type: atomic + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting here overrides + any global NodeSelector settings within the CloudKitty CR. + type: object + replicas: + default: 1 + description: Replicas - CloudKitty API Replicas + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tls: + description: TLS - Parameters related to the TLS + properties: + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs in + a pre-created bundle file + type: string + secretName: + description: SecretName - holding the cert, key for the service + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service + references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + type: object + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config for all CloudKitty services using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + databaseAccount: + default: cloudkitty + description: DatabaseAccount - optional MariaDBAccount used for cloudkitty + DB, defaults to cloudkitty + type: string + databaseInstance: + default: openstack + description: |- + MariaDB instance name + Right now required by the maridb-operator to get the credentials from the instance to create the DB + Might not be required in future + type: string + lokiStackSize: + default: 1x.demo + description: Size of the LokiStack. Supported are "1x.demo" (default), + "1x.pico", "1x.extra-small", "1x.small", "1x.medium" + enum: + - "" + - 1x.demo + - 1x.pico + - 1x.extra-small + - 1x.small + - 1x.medium + type: string + memcachedInstance: + default: memcached + description: Memcached instance name. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting + NodeSelector here acts as a default value and can be overridden by service + specific NodeSelector Settings. + type: object + passwordSelector: + default: + cloudKittyService: CloudKittyPassword + description: PasswordsSelectors - Selectors to identify the ServiceUser + password from the Secret + properties: + aodhService: + default: AodhPassword + description: AodhService - Selector to get the aodh service password + from the Secret + type: string + ceilometerService: + default: CeilometerPassword + description: CeilometerService - Selector to get the ceilometer + service password from the Secret + type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string + type: object + period: + default: 300 + description: Period for collecting metrics in seconds + format: int32 + type: integer + preserveJobs: + default: false + description: PreserveJobs - do not delete jobs after they finished + e.g. to check logs + type: boolean + prometheusHost: + description: Host of user deployed prometheus + type: string + prometheusPort: + description: Port of user deployed prometheus + format: int32 + maximum: 65535 + minimum: 1 + type: integer + prometheusTLSCaCertSecret: + description: If defined, specifies which CA certificate to use for + user deployed prometheus + nullable: true + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + rabbitMqClusterName: + default: rabbitmq + description: |- + RabbitMQ instance name + Needed to request a transportURL that is created and used in CloudKitty + type: string + s3StorageConfig: + default: + secret: + name: cloudkitty-loki-s3 + type: s3 + description: S3 related configuration passed to Loki + properties: + schemas: + default: + - effectiveDate: "2020-10-11" + version: v11 + description: Schemas for reading and writing logs. + items: + properties: + effectiveDate: + description: |- + EffectiveDate contains a date in YYYY-MM-DD format which is interpreted in the UTC time zone. + + The configuration always needs at least one schema that is currently valid. This means that when creating a new + CloudKitty it is recommended to add a schema with the latest available version and an effective date of "yesterday". + New schema versions added to the configuration always needs to be placed "in the future", so that Loki can start + using it once the day rolls over. + type: string + version: + description: Version for writing and reading logs. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + secret: + description: |- + Secret for object storage authentication. + Name of a secret in the same namespace as the CloudKitty custom resource. + properties: + credentialMode: + description: |- + CredentialMode can be used to set the desired credential mode for authenticating with the object storage. + If this is not set, then the operator tries to infer the credential mode from the provided secret and its + own configuration. + type: string + name: + description: Name of a secret in the namespace configured + for object storage secrets. + type: string + type: + description: Type of object storage that should be used + type: string + type: object + tls: + description: TLS configuration for reaching the object storage + endpoint. + properties: + caKey: + description: |- + Key is the data key of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the CloudKitty custom resource. + If empty, it defaults to "service-ca.crt". + type: string + caName: + description: |- + CA is the name of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the CloudKitty custom resource. + type: string + type: object + type: object + secret: + default: osp-secret + description: Secret containing OpenStack password information + type: string + serviceUser: + default: cloudkitty + description: ServiceUser - optional username used for this service + to register in cloudkitty + type: string + storageClass: + description: Storage class used for Loki + type: string + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + type: object + status: + description: CloudKittyStatus defines the observed state of CloudKitty + properties: + apiEndpoints: + additionalProperties: + additionalProperties: + type: string + type: object + description: API endpoints + type: object + cloudKittyAPIReadyCount: + default: 0 + description: ReadyCount of CloudKitty API instance + format: int32 + minimum: 0 + type: integer + cloudKittyProcReadyCounts: + default: 0 + description: ReadyCount of CloudKitty Processor instances + format: int32 + minimum: 0 + type: integer + conditions: + description: Conditions + items: + description: Condition defines an observation of a API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. + type: string + severity: + description: |- + Severity provides a classification of Reason code, so the current situation is immediately + understandable and could act accordingly. + It is meant for situations where Status=False and it should be indicated if it is just + informational, warning (next reconciliation might fix it) or an error (e.g. DB create issue + and no actions to automatically resolve the issue can/should be done). + For conditions where Status=Unknown or Status=True the Severity should be SeverityNone. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + databaseHostname: + description: CloudKitty Database Hostname + type: string + hash: + additionalProperties: + type: string + description: Map of hashes to track e.g. job status + type: object + observedGeneration: + description: |- + ObservedGeneration - the most recent generation observed for this service. + If the observed generation is different than the spec generation, then the + controller has not started processing the latest changes, and the status + and its conditions are likely stale. + format: int64 + type: integer + prometheusHostname: + description: PrometheusHost - Hostname for prometheus used for autoscaling + type: string + prometheusPort: + description: PrometheusPort - Port for prometheus used for autoscaling + format: int32 + type: integer + prometheusTLS: + description: PrometheusTLS - Determines if TLS should be used for + accessing prometheus + type: boolean + serviceIDs: + additionalProperties: + type: string + description: ServiceIDs + type: object + transportURLSecret: + description: TransportURLSecret - Secret containing RabbitMQ transportURL + type: string + required: + - cloudKittyAPIReadyCount + - cloudKittyProcReadyCounts + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/api/bases/telemetry.openstack.org_cloudkittyapis.yaml b/api/bases/telemetry.openstack.org_cloudkittyapis.yaml new file mode 100644 index 00000000..9cf9619d --- /dev/null +++ b/api/bases/telemetry.openstack.org_cloudkittyapis.yaml @@ -0,0 +1,500 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: cloudkittyapis.telemetry.openstack.org +spec: + group: telemetry.openstack.org + names: + kind: CloudKittyAPI + listKind: CloudKittyAPIList + plural: cloudkittyapis + singular: cloudkittyapi + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: CloudKittyAPI is the Schema for the cloudkittyapis API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudKittyAPISpec defines the desired state of CloudKittyAPI + properties: + containerImage: + description: ContainerImage - CloudKitty Container Image URL (will + be set to environmental default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + customServiceConfigSecrets: + description: |- + CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + that contain sensitive service config data. The content of each Secret gets added to the + /etc//.conf.d directory as a custom config file. + items: + type: string + type: array + x-kubernetes-list-type: atomic + databaseAccount: + default: cloudkitty + description: DatabaseAccount - optional MariaDBAccount used for cloudkitty + DB, defaults to cloudkitty + type: string + databaseHostname: + description: DatabaseHostname - CloudKitty Database Hostname + type: string + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment resource + names to expose the services to the given network + items: + type: string + type: array + x-kubernetes-list-type: atomic + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting here overrides + any global NodeSelector settings within the CloudKitty CR. + type: object + override: + description: Override, provides the ability to override the generated + manifest of several child resources. + properties: + service: + additionalProperties: + description: |- + RoutedOverrideSpec - a routed service override configuration for the Service created to serve traffic + to the cluster. Allows for the manifest of the created Service to be overwritten with custom configuration. + properties: + endpointURL: + type: string + metadata: + description: |- + EmbeddedLabelsAnnotations is an embedded subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta. + Only labels and annotations are included. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + type: object + type: object + spec: + description: |- + OverrideServiceSpec is a subset of the fields included in https://pkg.go.dev/k8s.io/api@v0.26.6/core/v1#ServiceSpec + Limited to Type, SessionAffinity, LoadBalancerSourceRanges, ExternalName, ExternalTrafficPolicy, SessionAffinityConfig, + IPFamilyPolicy, LoadBalancerClass and InternalTrafficPolicy + properties: + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + description: |- + Override configuration for the Service created to serve traffic to the cluster. + The key must be the endpoint type (public, internal) + type: object + type: object + passwordSelector: + default: + cloudKittyService: CloudKittyPassword + description: PasswordsSelectors - Selectors to identify the ServiceUser + password from the Secret + properties: + aodhService: + default: AodhPassword + description: AodhService - Selector to get the aodh service password + from the Secret + type: string + ceilometerService: + default: CeilometerPassword + description: CeilometerService - Selector to get the ceilometer + service password from the Secret + type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string + type: object + replicas: + default: 1 + description: Replicas - CloudKitty API Replicas + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + secret: + default: osp-secret + description: Secret containing OpenStack password information + type: string + serviceAccount: + description: ServiceAccount - service account name used internally + to provide CloudKitty services the default SA name + type: string + serviceUser: + default: cloudkitty + description: ServiceUser - optional username used for this service + to register in cloudkitty + type: string + tls: + description: TLS - Parameters related to the TLS + properties: + api: + description: API tls type which encapsulates for API services + properties: + internal: + description: Internal GenericService - holds the secret for + the internal endpoint + properties: + secretName: + description: SecretName - holding the cert, key for the + service + type: string + type: object + public: + description: Public GenericService - holds the secret for + the public endpoint + properties: + secretName: + description: SecretName - holding the cert, key for the + service + type: string + type: object + type: object + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs in a pre-created + bundle file + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + transportURLSecret: + description: Secret containing RabbitMq transport URL + type: string + type: object + status: + description: CloudKittyAPIStatus defines the observed state of CloudKittyAPI + properties: + apiEndpoints: + additionalProperties: + additionalProperties: + type: string + type: object + description: API endpoints + type: object + conditions: + description: Conditions + items: + description: Condition defines an observation of a API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. + type: string + severity: + description: |- + Severity provides a classification of Reason code, so the current situation is immediately + understandable and could act accordingly. + It is meant for situations where Status=False and it should be indicated if it is just + informational, warning (next reconciliation might fix it) or an error (e.g. DB create issue + and no actions to automatically resolve the issue can/should be done). + For conditions where Status=Unknown or Status=True the Severity should be SeverityNone. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + hash: + additionalProperties: + type: string + description: Map of hashes to track e.g. job status + type: object + lastAppliedTopology: + description: LastAppliedTopology - the last applied Topology + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + networkAttachments: + additionalProperties: + items: + type: string + type: array + description: NetworkAttachments status of the deployment pods + type: object + observedGeneration: + description: |- + ObservedGeneration - the most recent generation observed for this service. + If the observed generation is different than the spec generation, then the + controller has not started processing the latest changes, and the status + and its conditions are likely stale. + format: int64 + type: integer + readyCount: + default: 0 + description: ReadyCount of CloudKitty API instances + format: int32 + minimum: 0 + type: integer + serviceIDs: + additionalProperties: + type: string + description: ServiceIDs + type: object + required: + - readyCount + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/api/bases/telemetry.openstack.org_cloudkittyprocs.yaml b/api/bases/telemetry.openstack.org_cloudkittyprocs.yaml new file mode 100644 index 00000000..1caddbd5 --- /dev/null +++ b/api/bases/telemetry.openstack.org_cloudkittyprocs.yaml @@ -0,0 +1,325 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: cloudkittyprocs.telemetry.openstack.org +spec: + group: telemetry.openstack.org + names: + kind: CloudKittyProc + listKind: CloudKittyProcList + plural: cloudkittyprocs + singular: cloudkittyproc + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: NetworkAttachments + jsonPath: .status.networkAttachments + name: NetworkAttachments + type: string + - description: Status + jsonPath: .status.conditions[0].status + name: Status + type: string + - description: Message + jsonPath: .status.conditions[0].message + name: Message + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: CloudKittyProc is the Schema for the cloudkittprocs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudKittyProcSpec defines the desired state of CloudKitty + Processor + properties: + containerImage: + description: ContainerImage - CloudKitty Container Image URL (will + be set to environmental default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + customServiceConfigSecrets: + description: |- + CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + that contain sensitive service config data. The content of each Secret gets added to the + /etc//.conf.d directory as a custom config file. + items: + type: string + type: array + x-kubernetes-list-type: atomic + databaseAccount: + default: cloudkitty + description: DatabaseAccount - optional MariaDBAccount used for cloudkitty + DB, defaults to cloudkitty + type: string + databaseHostname: + description: DatabaseHostname - CloudKitty Database Hostname + type: string + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment resource + names to expose the services to the given network + items: + type: string + type: array + x-kubernetes-list-type: atomic + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting here overrides + any global NodeSelector settings within the CloudKitty CR. + type: object + passwordSelector: + default: + cloudKittyService: CloudKittyPassword + description: PasswordsSelectors - Selectors to identify the ServiceUser + password from the Secret + properties: + aodhService: + default: AodhPassword + description: AodhService - Selector to get the aodh service password + from the Secret + type: string + ceilometerService: + default: CeilometerPassword + description: CeilometerService - Selector to get the ceilometer + service password from the Secret + type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string + type: object + replicas: + default: 1 + description: Replicas - CloudKitty API Replicas + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + secret: + default: osp-secret + description: Secret containing OpenStack password information + type: string + serviceAccount: + description: ServiceAccount - service account name used internally + to provide CloudKitty services the default SA name + type: string + serviceUser: + default: cloudkitty + description: ServiceUser - optional username used for this service + to register in cloudkitty + type: string + tls: + description: TLS - Parameters related to the TLS + properties: + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs in a pre-created + bundle file + type: string + secretName: + description: SecretName - holding the cert, key for the service + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + transportURLSecret: + description: Secret containing RabbitMq transport URL + type: string + type: object + status: + description: CloudKittyProcStatus defines the observed state of CloudKitty + Processor + properties: + conditions: + description: Conditions + items: + description: Condition defines an observation of a API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. + type: string + severity: + description: |- + Severity provides a classification of Reason code, so the current situation is immediately + understandable and could act accordingly. + It is meant for situations where Status=False and it should be indicated if it is just + informational, warning (next reconciliation might fix it) or an error (e.g. DB create issue + and no actions to automatically resolve the issue can/should be done). + For conditions where Status=Unknown or Status=True the Severity should be SeverityNone. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + hash: + additionalProperties: + type: string + description: Map of hashes to track e.g. job status + type: object + lastAppliedTopology: + description: LastAppliedTopology - the last applied Topology + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + networkAttachments: + additionalProperties: + items: + type: string + type: array + description: NetworkAttachments status of the deployment pods + type: object + observedGeneration: + description: |- + ObservedGeneration - the most recent generation observed for this service. + If the observed generation is different than the spec generation, then the + controller has not started processing the latest changes, and the status + and its conditions are likely stale. + format: int64 + type: integer + readyCount: + default: 0 + description: ReadyCount of CloudKitty Processor instances + format: int32 + minimum: 0 + type: integer + required: + - readyCount + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/api/bases/telemetry.openstack.org_telemetries.yaml b/api/bases/telemetry.openstack.org_telemetries.yaml index 11cf9354..e48a8056 100644 --- a/api/bases/telemetry.openstack.org_telemetries.yaml +++ b/api/bases/telemetry.openstack.org_telemetries.yaml @@ -296,6 +296,11 @@ spec: description: CeilometerService - Selector to get the ceilometer service password from the Secret type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string type: object preserveJobs: default: false @@ -526,6 +531,11 @@ spec: description: CeilometerService - Selector to get the ceilometer service password from the Secret type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string type: object proxyImage: type: string @@ -585,6 +595,677 @@ spec: - secret - sgCoreImage type: object + cloudkitty: + description: CloudKitty - Parameters related to the cloudkitty service + properties: + apiTimeout: + default: 60 + description: APITimeout for HAProxy, Apache, and rpc_response_timeout + type: integer + cloudKittyAPI: + description: CloudKittyAPI - Spec definition for the API service + of this CloudKitty deployment + properties: + containerImage: + description: ContainerImage - CloudKitty Container Image URL + (will be set to environmental default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + customServiceConfigSecrets: + description: |- + CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + that contain sensitive service config data. The content of each Secret gets added to the + /etc//.conf.d directory as a custom config file. + items: + type: string + type: array + x-kubernetes-list-type: atomic + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment + resource names to expose the services to the given network + items: + type: string + type: array + x-kubernetes-list-type: atomic + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting here overrides + any global NodeSelector settings within the CloudKitty CR. + type: object + override: + description: Override, provides the ability to override the + generated manifest of several child resources. + properties: + service: + additionalProperties: + description: |- + RoutedOverrideSpec - a routed service override configuration for the Service created to serve traffic + to the cluster. Allows for the manifest of the created Service to be overwritten with custom configuration. + properties: + endpointURL: + type: string + metadata: + description: |- + EmbeddedLabelsAnnotations is an embedded subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta. + Only labels and annotations are included. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + type: object + type: object + spec: + description: |- + OverrideServiceSpec is a subset of the fields included in https://pkg.go.dev/k8s.io/api@v0.26.6/core/v1#ServiceSpec + Limited to Type, SessionAffinity, LoadBalancerSourceRanges, ExternalName, ExternalTrafficPolicy, SessionAffinityConfig, + IPFamilyPolicy, LoadBalancerClass and InternalTrafficPolicy + properties: + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + description: |- + Override configuration for the Service created to serve traffic to the cluster. + The key must be the endpoint type (public, internal) + type: object + type: object + replicas: + default: 1 + description: Replicas - CloudKitty API Replicas + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tls: + description: TLS - Parameters related to the TLS + properties: + api: + description: API tls type which encapsulates for API services + properties: + internal: + description: Internal GenericService - holds the secret + for the internal endpoint + properties: + secretName: + description: SecretName - holding the cert, key + for the service + type: string + type: object + public: + description: Public GenericService - holds the secret + for the public endpoint + properties: + secretName: + description: SecretName - holding the cert, key + for the service + type: string + type: object + type: object + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs + in a pre-created bundle file + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service + references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + type: object + cloudKittyProc: + description: CloudKittyProc - Spec definition for the Scheduler + service of this CloudKitty deployment + properties: + containerImage: + description: ContainerImage - CloudKitty Container Image URL + (will be set to environmental default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + customServiceConfigSecrets: + description: |- + CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + that contain sensitive service config data. The content of each Secret gets added to the + /etc//.conf.d directory as a custom config file. + items: + type: string + type: array + x-kubernetes-list-type: atomic + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment + resource names to expose the services to the given network + items: + type: string + type: array + x-kubernetes-list-type: atomic + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting here overrides + any global NodeSelector settings within the CloudKitty CR. + type: object + replicas: + default: 1 + description: Replicas - CloudKitty API Replicas + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tls: + description: TLS - Parameters related to the TLS + properties: + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs + in a pre-created bundle file + type: string + secretName: + description: SecretName - holding the cert, key for the + service + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service + references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + type: object + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config for all CloudKitty services using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + databaseAccount: + default: cloudkitty + description: DatabaseAccount - optional MariaDBAccount used for + cloudkitty DB, defaults to cloudkitty + type: string + databaseInstance: + default: openstack + description: |- + MariaDB instance name + Right now required by the maridb-operator to get the credentials from the instance to create the DB + Might not be required in future + type: string + enabled: + default: false + description: Enabled - Whether OpenStack CloudKitty service should + be deployed and managed + type: boolean + lokiStackSize: + default: 1x.demo + description: Size of the LokiStack. Supported are "1x.demo" (default), + "1x.pico", "1x.extra-small", "1x.small", "1x.medium" + enum: + - "" + - 1x.demo + - 1x.pico + - 1x.extra-small + - 1x.small + - 1x.medium + type: string + memcachedInstance: + default: memcached + description: Memcached instance name. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting + NodeSelector here acts as a default value and can be overridden by service + specific NodeSelector Settings. + type: object + passwordSelector: + default: + cloudKittyService: CloudKittyPassword + description: PasswordsSelectors - Selectors to identify the ServiceUser + password from the Secret + properties: + aodhService: + default: AodhPassword + description: AodhService - Selector to get the aodh service + password from the Secret + type: string + ceilometerService: + default: CeilometerPassword + description: CeilometerService - Selector to get the ceilometer + service password from the Secret + type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string + type: object + period: + default: 300 + description: Period for collecting metrics in seconds + format: int32 + type: integer + preserveJobs: + default: false + description: PreserveJobs - do not delete jobs after they finished + e.g. to check logs + type: boolean + prometheusHost: + description: Host of user deployed prometheus + type: string + prometheusPort: + description: Port of user deployed prometheus + format: int32 + maximum: 65535 + minimum: 1 + type: integer + prometheusTLSCaCertSecret: + description: If defined, specifies which CA certificate to use + for user deployed prometheus + nullable: true + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + rabbitMqClusterName: + default: rabbitmq + description: |- + RabbitMQ instance name + Needed to request a transportURL that is created and used in CloudKitty + type: string + s3StorageConfig: + default: + secret: + name: cloudkitty-loki-s3 + type: s3 + description: S3 related configuration passed to Loki + properties: + schemas: + default: + - effectiveDate: "2020-10-11" + version: v11 + description: Schemas for reading and writing logs. + items: + properties: + effectiveDate: + description: |- + EffectiveDate contains a date in YYYY-MM-DD format which is interpreted in the UTC time zone. + + The configuration always needs at least one schema that is currently valid. This means that when creating a new + CloudKitty it is recommended to add a schema with the latest available version and an effective date of "yesterday". + New schema versions added to the configuration always needs to be placed "in the future", so that Loki can start + using it once the day rolls over. + type: string + version: + description: Version for writing and reading logs. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + secret: + description: |- + Secret for object storage authentication. + Name of a secret in the same namespace as the CloudKitty custom resource. + properties: + credentialMode: + description: |- + CredentialMode can be used to set the desired credential mode for authenticating with the object storage. + If this is not set, then the operator tries to infer the credential mode from the provided secret and its + own configuration. + type: string + name: + description: Name of a secret in the namespace configured + for object storage secrets. + type: string + type: + description: Type of object storage that should be used + type: string + type: object + tls: + description: TLS configuration for reaching the object storage + endpoint. + properties: + caKey: + description: |- + Key is the data key of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the CloudKitty custom resource. + If empty, it defaults to "service-ca.crt". + type: string + caName: + description: |- + CA is the name of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the CloudKitty custom resource. + type: string + type: object + type: object + secret: + default: osp-secret + description: Secret containing OpenStack password information + type: string + serviceUser: + default: cloudkitty + description: ServiceUser - optional username used for this service + to register in cloudkitty + type: string + storageClass: + description: Storage class used for Loki + type: string + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service + references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + type: object logging: description: Logging - Parameters related to the logging properties: diff --git a/api/v1beta1/autoscaling_types.go b/api/v1beta1/autoscaling_types.go index d6f3b99b..f541155e 100644 --- a/api/v1beta1/autoscaling_types.go +++ b/api/v1beta1/autoscaling_types.go @@ -17,13 +17,12 @@ limitations under the License. package v1beta1 import ( + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/tls" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" - "github.com/openstack-k8s-operators/lib-common/modules/common/service" - "github.com/openstack-k8s-operators/lib-common/modules/common/util" "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -139,13 +138,6 @@ type AodhCore struct { TopologyRef *topologyv1.TopoRef `json:"topologyRef,omitempty"` } -// APIOverrideSpec to override the generated manifest of several child resources. -type APIOverrideSpec struct { - // Override configuration for the Service created to serve traffic to the cluster. - // The key must be the endpoint type (public, internal) - Service map[service.Endpoint]service.RoutedOverrideSpec `json:"service,omitempty"` -} - // AutoscalingSpec defines the desired state of Autoscaling type AutoscalingSpec struct { AutoscalingSpecBase `json:",inline"` diff --git a/api/v1beta1/cloudkitty_types.go b/api/v1beta1/cloudkitty_types.go new file mode 100644 index 00000000..717a7061 --- /dev/null +++ b/api/v1beta1/cloudkitty_types.go @@ -0,0 +1,399 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // CloudKittyUserID - Kolla's cloudkitty UID comes from the 'cloudkitty-user' in + // https://github.com/openstack/kolla/blob/master/kolla/common/users.py + CloudKittyUserID = 42408 + // CloudKittyGroupID - Kolla's cloudkitty GID + CloudKittyGroupID = 42408 + + // CloudKittyAPIContainerImage - default fall-back image for CloudKitty API + CloudKittyAPIContainerImage = "quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api:current" + // CloudKittyProcContainerImage - default fall-back image for CloudKitty Processor + CloudKittyProcContainerImage = "quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor:current" + // CloudKittyDbSyncHash hash + CKDbSyncHash = "ckdbsync" + // CKStorageInitHash hash + CKStorageInitHash = "ckstorageinit" + // CloudKittyReplicas - The number of replicas per each service deployed + CloudKittyReplicas = 1 +) + +// Reimplementation of loki-operator's Object storage related API. +// By doing this, we don't need to have a dependency on the loki-operator in +// the API module and it allows us to have all the fields optional due to +// worries about possible issues with upgrades when having required fields here + +type CASpec struct { + // Key is the data key of a ConfigMap containing a CA certificate. + // It needs to be in the same namespace as the CloudKitty custom resource. + // If empty, it defaults to "service-ca.crt". + // + // +kubebuilder:validation:optional + CAKey string `json:"caKey,omitempty"` + // CA is the name of a ConfigMap containing a CA certificate. + // It needs to be in the same namespace as the CloudKitty custom resource. + // + // +kubebuilder:validation:optional + CA string `json:"caName,omitempty"` +} + +type ObjectStorageSchema struct { + // Version for writing and reading logs. + // + // +kubebuilder:validation:Optional + Version string `json:"version"` + + // EffectiveDate contains a date in YYYY-MM-DD format which is interpreted in the UTC time zone. + // + // The configuration always needs at least one schema that is currently valid. This means that when creating a new + // CloudKitty it is recommended to add a schema with the latest available version and an effective date of "yesterday". + // New schema versions added to the configuration always needs to be placed "in the future", so that Loki can start + // using it once the day rolls over. + // + // +kubebuilder:validation:Optional + EffectiveDate string `json:"effectiveDate"` +} + +type ObjectStorageSecretSpec struct { + // Type of object storage that should be used + // + // +kubebuilder:validation:Optional + Type string `json:"type"` + + // Name of a secret in the namespace configured for object storage secrets. + // + // +kubebuilder:validation:Optional + Name string `json:"name"` + + // CredentialMode can be used to set the desired credential mode for authenticating with the object storage. + // If this is not set, then the operator tries to infer the credential mode from the provided secret and its + // own configuration. + // + // +kubebuilder:validation:Optional + CredentialMode string `json:"credentialMode,omitempty"` +} + +type ObjectStorageTLSSpec struct { + CASpec `json:",inline"` +} + +type ObjectStorageSpec struct { + // Schemas for reading and writing logs. + // + // +kubebuilder:validation:Optional + // +kubebuilder:validation:MinItems:=1 + // +kubebuilder:default:={{version:v11,effectiveDate:"2020-10-11"}} + // +listType=atomic + Schemas []ObjectStorageSchema `json:"schemas"` + + // Secret for object storage authentication. + // Name of a secret in the same namespace as the CloudKitty custom resource. + // + // +kubebuilder:validation:Optional + Secret ObjectStorageSecretSpec `json:"secret"` + + // TLS configuration for reaching the object storage endpoint. + // + // +kubebuilder:validation:Optional + TLS *ObjectStorageTLSSpec `json:"tls,omitempty"` +} + +type CloudKittySpecBase struct { + CloudKittyTemplate `json:",inline"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=openstack + // MariaDB instance name + // Right now required by the maridb-operator to get the credentials from the instance to create the DB + // Might not be required in future + DatabaseInstance string `json:"databaseInstance"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=rabbitmq + // RabbitMQ instance name + // Needed to request a transportURL that is created and used in CloudKitty + RabbitMqClusterName string `json:"rabbitMqClusterName"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=memcached + // Memcached instance name. + MemcachedInstance string `json:"memcachedInstance"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=false + // PreserveJobs - do not delete jobs after they finished e.g. to check logs + PreserveJobs bool `json:"preserveJobs"` + + // +kubebuilder:validation:Optional + // CustomServiceConfig - customize the service config for all CloudKitty services using this parameter to change service defaults, + // or overwrite rendered information using raw OpenStack config format. The content gets added to + // to /etc//.conf.d directory as a custom config file. + CustomServiceConfig string `json:"customServiceConfig,omitempty"` + + // +kubebuilder:validation:Optional + // NodeSelector to target subset of worker nodes running this service. Setting + // NodeSelector here acts as a default value and can be overridden by service + // specific NodeSelector Settings. + NodeSelector *map[string]string `json:"nodeSelector,omitempty"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=60 + // APITimeout for HAProxy, Apache, and rpc_response_timeout + APITimeout int `json:"apiTimeout"` + + // +kubebuilder:validation:Optional + // TopologyRef to apply the Topology defined by the associated CR referenced + // by name + TopologyRef *topologyv1.TopoRef `json:"topologyRef,omitempty"` + + // Host of user deployed prometheus + // +kubebuilder:validation:Optional + PrometheusHost string `json:"prometheusHost,omitempty"` + + // Port of user deployed prometheus + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Optional + PrometheusPort int32 `json:"prometheusPort,omitempty"` + + // If defined, specifies which CA certificate to use for user deployed prometheus + // +kubebuilder:validation:Optional + // +nullable + PrometheusTLSCaCertSecret *corev1.SecretKeySelector `json:"prometheusTLSCaCertSecret,omitempty"` + + // Period for collecting metrics in seconds + // +kubebuilder:validation:Optional + // +kubebuilder:default=300 + Period int32 `json:"period"` + + // S3 related configuration passed to Loki + // +kubebuilder:validation:Optional + // +kubebuilder:default={secret: {name: "cloudkitty-loki-s3", type: "s3"}} + S3StorageConfig ObjectStorageSpec `json:"s3StorageConfig"` + + // Storage class used for Loki + // +kubebuilder:validation:Optional + StorageClass string `json:"storageClass,omitempty"` + + // Size of the LokiStack. Supported are "1x.demo" (default), "1x.pico", "1x.extra-small", "1x.small", "1x.medium" + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Enum="";"1x.demo";"1x.pico";"1x.extra-small";"1x.small";"1x.medium" + // +kubebuilder:default="1x.demo" + LokiStackSize string `json:"lokiStackSize"` +} + +// CloudKittySpecCore the same as CloudKittySpec without ContainerImage references +type CloudKittySpecCore struct { + CloudKittySpecBase `json:",inline"` + + // +kubebuilder:validation:Optional + // CloudKittyAPI - Spec definition for the API service of this CloudKitty deployment + CloudKittyAPI CloudKittyAPITemplateCore `json:"cloudKittyAPI"` + + // +kubebuilder:validation:Optional + // CloudKittyProc - Spec definition for the Scheduler service of this CloudKitty deployment + CloudKittyProc CloudKittyProcTemplateCore `json:"cloudKittyProc"` +} + +// CloudKittySpec defines the desired state of CloudKitty +type CloudKittySpec struct { + CloudKittySpecBase `json:",inline"` + + // +kubebuilder:validation:Optional + // CloudKittyAPI - Spec definition for the API service of this CloudKitty deployment + CloudKittyAPI CloudKittyAPITemplate `json:"cloudKittyAPI"` + + // +kubebuilder:validation:Optional + // CloudKittyProc - Spec definition for the Scheduler service of this CloudKitty deployment + CloudKittyProc CloudKittyProcTemplate `json:"cloudKittyProc"` +} + +// CloudKittyTemplate defines common input parameters used by all CloudKitty services +type CloudKittyTemplate struct { + // +kubebuilder:validation:Optional + // +kubebuilder:default=cloudkitty + // ServiceUser - optional username used for this service to register in cloudkitty + ServiceUser string `json:"serviceUser"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=cloudkitty + // DatabaseAccount - optional MariaDBAccount used for cloudkitty DB, defaults to cloudkitty + DatabaseAccount string `json:"databaseAccount"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=osp-secret + // Secret containing OpenStack password information + Secret string `json:"secret"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default={cloudKittyService: CloudKittyPassword} + // PasswordsSelectors - Selectors to identify the ServiceUser password from the Secret + PasswordSelectors PasswordsSelector `json:"passwordSelector"` +} + +// CloudKittyServiceTemplate defines the input parameters that can be defined for a given +// CloudKitty service +type CloudKittyServiceTemplate struct { + + // +kubebuilder:validation:Optional + // NodeSelector to target subset of worker nodes running this service. Setting here overrides + // any global NodeSelector settings within the CloudKitty CR. + NodeSelector *map[string]string `json:"nodeSelector,omitempty"` + + // +kubebuilder:validation:Optional + // CustomServiceConfig - customize the service config using this parameter to change service defaults, + // or overwrite rendered information using raw OpenStack config format. The content gets added to + // to /etc//.conf.d directory as a custom config file. + CustomServiceConfig string `json:"customServiceConfig,omitempty"` + + // +kubebuilder:validation:Optional + // +listType=atomic + // CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + // that contain sensitive service config data. The content of each Secret gets added to the + // /etc//.conf.d directory as a custom config file. + CustomServiceConfigSecrets []string `json:"customServiceConfigSecrets,omitempty"` + + // +kubebuilder:validation:Optional + // Resources - Compute Resources required by this service (Limits/Requests). + // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // +kubebuilder:validation:Optional + // +listType=atomic + // NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network + NetworkAttachments []string `json:"networkAttachments,omitempty"` + + // +kubebuilder:validation:Optional + // TopologyRef to apply the Topology defined by the associated CR referenced + // by name + TopologyRef *topologyv1.TopoRef `json:"topologyRef,omitempty"` +} + +// CloudKittyStatus defines the observed state of CloudKitty +type CloudKittyStatus struct { + // Map of hashes to track e.g. job status + Hash map[string]string `json:"hash,omitempty"` + + // Conditions + Conditions condition.Conditions `json:"conditions,omitempty" optional:"true"` + + // CloudKitty Database Hostname + DatabaseHostname string `json:"databaseHostname,omitempty"` + + // TransportURLSecret - Secret containing RabbitMQ transportURL + TransportURLSecret string `json:"transportURLSecret,omitempty"` + + // API endpoints + APIEndpoints map[string]map[string]string `json:"apiEndpoints,omitempty"` + + // ServiceIDs + ServiceIDs map[string]string `json:"serviceIDs,omitempty"` + + // ReadyCount of CloudKitty API instance + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=0 + CloudKittyAPIReadyCount int32 `json:"cloudKittyAPIReadyCount"` + + // ReadyCount of CloudKitty Processor instances + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=0 + CloudKittyProcReadyCount int32 `json:"cloudKittyProcReadyCounts"` + + // ObservedGeneration - the most recent generation observed for this service. + // If the observed generation is different than the spec generation, then the + // controller has not started processing the latest changes, and the status + // and its conditions are likely stale. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // PrometheusHost - Hostname for prometheus used for autoscaling + PrometheusHost string `json:"prometheusHostname,omitempty"` + + // PrometheusPort - Port for prometheus used for autoscaling + PrometheusPort int32 `json:"prometheusPort,omitempty"` + + // PrometheusTLS - Determines if TLS should be used for accessing prometheus + PrometheusTLS bool `json:"prometheusTLS,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// CloudKitty is the Schema for the cloudkitties API +type CloudKitty struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CloudKittySpec `json:"spec,omitempty"` + Status CloudKittyStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// CloudKittyList contains a list of CloudKitty +type CloudKittyList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CloudKitty `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CloudKitty{}, &CloudKittyList{}) +} + +// SetupDefaultsCloudKitty - initializes any CRD field defaults based on environment variables (the defaulting mechanism itself is implemented via webhooks) +func SetupDefaultsCloudKitty() { + // Acquire environmental defaults and initialize Telemetry defaults with them + cloudKittyDefaults := CloudKittyDefaults{ + APIContainerImageURL: util.GetEnvVar("RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT", CloudKittyAPIContainerImage), + ProcContainerImageURL: util.GetEnvVar("RELATED_IMAGE_CLOUDKITTY_PROCESSOR_IMAGE_URL_DEFAULT", CloudKittyProcContainerImage), + } + + SetupCloudKittyDefaults(cloudKittyDefaults) +} + +// IsReady - returns true if all subresources Ready condition is true +func (instance CloudKitty) IsReady() bool { + return instance.Generation == instance.Status.ObservedGeneration && + instance.Status.Conditions.IsTrue(CloudKittyAPIReadyCondition) && + instance.Status.Conditions.IsTrue(CloudKittyProcReadyCondition) +} + +// RbacConditionsSet - set the conditions for the rbac object +func (instance CloudKitty) RbacConditionsSet(c *condition.Condition) { + instance.Status.Conditions.Set(c) +} + +// RbacNamespace - return the namespace +func (instance CloudKitty) RbacNamespace() string { + return instance.Namespace +} + +// RbacResourceName - return the name to be used for rbac objects (serviceaccount, role, rolebinding) +func (instance CloudKitty) RbacResourceName() string { + return "cloudkitty-" + instance.Name +} diff --git a/api/v1beta1/cloudkitty_webhook.go b/api/v1beta1/cloudkitty_webhook.go new file mode 100644 index 00000000..dc3e0517 --- /dev/null +++ b/api/v1beta1/cloudkitty_webhook.go @@ -0,0 +1,227 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + "slices" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// CloudKittyDefaults - +type CloudKittyDefaults struct { + APIContainerImageURL string + ProcContainerImageURL string +} + +var cloudKittyDefaults CloudKittyDefaults + +// log is for logging in this package. +var cloudKittyLog = logf.Log.WithName("cloudkitty-resource") + +// SetupCloudKittyDefaults - initialize CloudKitty spec defaults for use with either internal or external webhooks +func SetupCloudKittyDefaults(defaults CloudKittyDefaults) { + cloudKittyDefaults = defaults + cloudKittyLog.Info("CloudKitty defaults initialized", "defaults", defaults) +} + +// SetupWebhookWithManager - setups webhook with the adequate manager +func (r *CloudKitty) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! + +//+kubebuilder:webhook:path=/mutate-telemetry-openstack-org-v1beta1-cloudkitty,mutating=true,failurePolicy=fail,sideEffects=None,groups=telemetry.openstack.org,resources=cloudkitties,verbs=create;update,versions=v1beta1,name=mcloudkitty.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &CloudKitty{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *CloudKitty) Default() { + cloudKittyLog.Info("default", "name", r.Name) + + r.Spec.Default() +} + +// Default - set defaults for this CloudKitty spec +func (spec *CloudKittySpec) Default() { + if spec.CloudKittyAPI.ContainerImage == "" { + spec.CloudKittyAPI.ContainerImage = cloudKittyDefaults.APIContainerImageURL + } + if spec.CloudKittyProc.ContainerImage == "" { + spec.CloudKittyProc.ContainerImage = cloudKittyDefaults.ProcContainerImageURL + } + +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-telemetry-openstack-org-v1beta1-cloudkitty,mutating=false,failurePolicy=fail,sideEffects=None,groups=telemetry.openstack.org,resources=cloudkitties,verbs=create;update,versions=v1beta1,name=vcloudkitty.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &CloudKitty{} + +func (r *ObjectStorageSpec) Validate(basePath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + // NOTE: Having 0 schemas is allowed. LokiStack has a default for that + for _, s := range r.Schemas { + if s.EffectiveDate == "" { + allErrs = append( + allErrs, + field.Invalid( + basePath.Child("schemas").Child("effectiveDate"), "", "effectiveDate field should not be empty"), + ) + } + if s.Version == "" { + allErrs = append( + allErrs, + field.Invalid( + basePath.Child("schemas").Child("version"), "", "version field should not be empty"), + ) + } + } + + if r.Secret.Name == "" { + allErrs = append( + allErrs, + field.Invalid( + basePath.Child("secret").Child("name"), "", "name field should not be empty"), + ) + } + + if r.Secret.Type == "" { + allErrs = append( + allErrs, + field.Invalid( + basePath.Child("secret").Child("type"), "", "type field should not be empty"), + ) + } + validTypes := []string{"azure", "gcs", "s3", "swift", "alibabacloud"} + if !slices.Contains(validTypes, r.Secret.Type) { + allErrs = append( + allErrs, + field.Invalid( + basePath.Child("secret").Child("type"), r.Secret.Type, fmt.Sprintf("type field needs to be one of %s", validTypes)), + ) + } + + if r.TLS != nil && r.TLS.CASpec.CA == "" { + allErrs = append( + allErrs, + field.Invalid( + basePath.Child("tls").Child("caName"), "", "caName field should not be empty"), + ) + } + return allErrs +} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *CloudKitty) ValidateCreate() (admission.Warnings, error) { + cloudKittyLog.Info("validate create", "name", r.Name) + + allErrs := r.Spec.ValidateCreate(field.NewPath("spec"), r.Namespace) + + if len(allErrs) != 0 { + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "cloudkitties.telemetry.openstack.org", Kind: "CloudKitty"}, + r.Name, allErrs) + } + + return nil, nil +} + +// ValidateCreate validates the CloudKittySpec during the webhook invocation. +func (r *CloudKittySpec) ValidateCreate(basePath *field.Path, namespace string) field.ErrorList { + return r.CloudKittySpecBase.ValidateCreate(basePath, namespace) +} + +// ValidateCreate validates the CloudKittySpecCore during the webhook invocation. It is +// expected to be called by the validation webhook in the higher level telemetry webhook +func (r *CloudKittySpecCore) ValidateCreate(basePath *field.Path, namespace string) field.ErrorList { + return r.CloudKittySpecBase.ValidateCreate(basePath, namespace) +} + +// ValidateCreate validates the CloudKittySpecBase during the webhook invocation. +func (r *CloudKittySpecBase) ValidateCreate(basePath *field.Path, namespace string) field.ErrorList { + var allErrs field.ErrorList + + allErrs = append(allErrs, r.S3StorageConfig.Validate(basePath.Child("s3StorageConfig"))...) + + // TODO: Add other CK spec field validations as needed + + return allErrs +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *CloudKitty) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + + cloudKittyLog.Info("validate update", "name", r.Name) + oldCloudKitty, ok := old.(*CloudKitty) + if !ok || oldCloudKitty == nil { + return nil, apierrors.NewInternalError(fmt.Errorf("unable to convert existing object")) + } + + allErrs := r.Spec.ValidateUpdate(oldCloudKitty.Spec, field.NewPath("spec"), r.Namespace) + + if len(allErrs) != 0 { + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "cloudkitties.telemetry.openstack.org", Kind: "CloudKitty"}, + r.Name, allErrs) + } + + return nil, nil + +} + +// ValidateCreate validates the CloudKittySpec during the webhook invocation. +func (r *CloudKittySpec) ValidateUpdate(old CloudKittySpec, basePath *field.Path, namespace string) field.ErrorList { + return r.CloudKittySpecBase.ValidateUpdate(old.CloudKittySpecBase, basePath, namespace) +} + +// ValidateUpdate validates the CloudKittySpecCore during the webhook invocation. It is +// expected to be called by the validation webhook in the higher level telemetry webhook +func (r *CloudKittySpecCore) ValidateUpdate(old CloudKittySpecCore, basePath *field.Path, namespace string) field.ErrorList { + return r.CloudKittySpecBase.ValidateUpdate(old.CloudKittySpecBase, basePath, namespace) +} + +// ValidateCreate validates the CloudKittySpecBase during the webhook invocation. +func (r *CloudKittySpecBase) ValidateUpdate(old CloudKittySpecBase, basePath *field.Path, namespace string) field.ErrorList { + var allErrs field.ErrorList + + allErrs = append(allErrs, r.S3StorageConfig.Validate(basePath.Child("s3StorageConfig"))...) + + // TODO: Add other CK spec field validations as needed + + return allErrs +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *CloudKitty) ValidateDelete() (admission.Warnings, error) { + cloudKittyLog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} diff --git a/api/v1beta1/cloudkittyapi_types.go b/api/v1beta1/cloudkittyapi_types.go new file mode 100644 index 00000000..3a71afb5 --- /dev/null +++ b/api/v1beta1/cloudkittyapi_types.go @@ -0,0 +1,155 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/tls" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CloudKittyAPITemplateCore defines the input parameters for the CloudKitty API service +type CloudKittyAPITemplateCore struct { + // Common input parameters for the CloudKitty API service + CloudKittyServiceTemplate `json:",inline"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=1 + // +kubebuilder:validation:Minimum=0 + // Replicas - CloudKitty API Replicas + Replicas *int32 `json:"replicas"` + + // +kubebuilder:validation:Optional + // Override, provides the ability to override the generated manifest of several child resources. + Override APIOverrideSpec `json:"override,omitempty"` + + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec + // TLS - Parameters related to the TLS + TLS tls.API `json:"tls,omitempty"` +} + +// CloudKittyAPITemplate defines the input parameters for the CloudKitty API service +type CloudKittyAPITemplate struct { + // +kubebuilder:validation:Optional + // ContainerImage - CloudKitty Container Image URL (will be set to environmental default if empty) + ContainerImage string `json:"containerImage"` + + CloudKittyAPITemplateCore `json:",inline"` +} + +// CloudKittyAPISpec defines the desired state of CloudKittyAPI +type CloudKittyAPISpec struct { + // Common input parameters for all CloudKitty services + CloudKittyTemplate `json:",inline"` + + // Input parameters for the CloudKitty API service + CloudKittyAPITemplate `json:",inline"` + + // +kubebuilder:validation:Optional + // DatabaseHostname - CloudKitty Database Hostname + DatabaseHostname string `json:"databaseHostname"` + + // +kubebuilder:validation:Optional + // Secret containing RabbitMq transport URL + TransportURLSecret string `json:"transportURLSecret"` + + // +kubebuilder:validation:Optional + // ServiceAccount - service account name used internally to provide CloudKitty services the default SA name + ServiceAccount string `json:"serviceAccount"` +} + +// CloudKittyAPIStatus defines the observed state of CloudKittyAPI +type CloudKittyAPIStatus struct { + // Map of hashes to track e.g. job status + Hash map[string]string `json:"hash,omitempty"` + + // API endpoints + APIEndpoints map[string]map[string]string `json:"apiEndpoints,omitempty"` + + // Conditions + Conditions condition.Conditions `json:"conditions,omitempty" optional:"true"` + + // ReadyCount of CloudKitty API instances + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=0 + ReadyCount int32 `json:"readyCount"` + + // ServiceIDs + ServiceIDs map[string]string `json:"serviceIDs,omitempty"` + + // NetworkAttachments status of the deployment pods + NetworkAttachments map[string][]string `json:"networkAttachments,omitempty"` + + // ObservedGeneration - the most recent generation observed for this service. + // If the observed generation is different than the spec generation, then the + // controller has not started processing the latest changes, and the status + // and its conditions are likely stale. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // LastAppliedTopology - the last applied Topology + LastAppliedTopology *topologyv1.TopoRef `json:"lastAppliedTopology,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// CloudKittyAPI is the Schema for the cloudkittyapis API +type CloudKittyAPI struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CloudKittyAPISpec `json:"spec,omitempty"` + Status CloudKittyAPIStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// CloudKittyAPIList contains a list of CloudKittyAPI +type CloudKittyAPIList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CloudKittyAPI `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CloudKittyAPI{}, &CloudKittyAPIList{}) +} + +// IsReady - returns true if service is ready to serve requests +func (instance CloudKittyAPI) IsReady() bool { + return instance.Generation == instance.Status.ObservedGeneration && + instance.Status.ReadyCount == *instance.Spec.Replicas && + (instance.Status.Conditions.IsTrue(condition.DeploymentReadyCondition) || + (instance.Status.Conditions.IsFalse(condition.DeploymentReadyCondition) && *instance.Spec.Replicas == 0)) +} + +// GetSpecTopologyRef - Returns the LastAppliedTopology Set in the Status +func (instance *CloudKittyAPI) GetSpecTopologyRef() *topologyv1.TopoRef { + return instance.Spec.TopologyRef +} + +// GetLastAppliedTopology - Returns the LastAppliedTopology Set in the Status +func (instance *CloudKittyAPI) GetLastAppliedTopology() *topologyv1.TopoRef { + return instance.Status.LastAppliedTopology +} + +// SetLastAppliedTopology - Sets the LastAppliedTopology value in the Status +func (instance *CloudKittyAPI) SetLastAppliedTopology(topologyRef *topologyv1.TopoRef) { + instance.Status.LastAppliedTopology = topologyRef +} diff --git a/api/v1beta1/cloudkittyproc_types.go b/api/v1beta1/cloudkittyproc_types.go new file mode 100644 index 00000000..83de10d1 --- /dev/null +++ b/api/v1beta1/cloudkittyproc_types.go @@ -0,0 +1,148 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/tls" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CloudKittyProcTemplateCore defines the input parameters for the CloudKitty Processor service +type CloudKittyProcTemplateCore struct { + // Common input parameters for the CloudKitty Processor service + CloudKittyServiceTemplate `json:",inline"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=1 + // +kubebuilder:validation:Minimum=0 + // Replicas - CloudKitty API Replicas + Replicas *int32 `json:"replicas"` + + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec + // TLS - Parameters related to the TLS + TLS tls.SimpleService `json:"tls,omitempty"` +} + +// CloudKittyProcTemplate defines the input parameters for the CloudKitty Processor service +type CloudKittyProcTemplate struct { + // +kubebuilder:validation:Optional + // ContainerImage - CloudKitty Container Image URL (will be set to environmental default if empty) + ContainerImage string `json:"containerImage"` + + CloudKittyProcTemplateCore `json:",inline"` +} + +// CloudKittyProcSpec defines the desired state of CloudKitty Processor +type CloudKittyProcSpec struct { + // Common input parameters for all CloudKitty services + CloudKittyTemplate `json:",inline"` + + // Input parameters for the CloudKitty Processor service + CloudKittyProcTemplate `json:",inline"` + + // +kubebuilder:validation:Optional + // DatabaseHostname - CloudKitty Database Hostname + DatabaseHostname string `json:"databaseHostname"` + + // +kubebuilder:validation:Optional + // Secret containing RabbitMq transport URL + TransportURLSecret string `json:"transportURLSecret"` + + // +kubebuilder:validation:Optional + // ServiceAccount - service account name used internally to provide CloudKitty services the default SA name + ServiceAccount string `json:"serviceAccount"` +} + +// CloudKittyProcStatus defines the observed state of CloudKitty Processor +type CloudKittyProcStatus struct { + // Map of hashes to track e.g. job status + Hash map[string]string `json:"hash,omitempty"` + + // Conditions + Conditions condition.Conditions `json:"conditions,omitempty" optional:"true"` + + // ReadyCount of CloudKitty Processor instances + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:default=0 + ReadyCount int32 `json:"readyCount"` + + // NetworkAttachments status of the deployment pods + NetworkAttachments map[string][]string `json:"networkAttachments,omitempty"` + + // ObservedGeneration - the most recent generation observed for this service. + // If the observed generation is different than the spec generation, then the + // controller has not started processing the latest changes, and the status + // and its conditions are likely stale. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // LastAppliedTopology - the last applied Topology + LastAppliedTopology *topologyv1.TopoRef `json:"lastAppliedTopology,omitempty"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printcolumn:name="NetworkAttachments",type="string",JSONPath=".status.networkAttachments",description="NetworkAttachments" +//+kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[0].status",description="Status" +//+kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[0].message",description="Message" + +// CloudKittyProc is the Schema for the cloudkittprocs API +type CloudKittyProc struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CloudKittyProcSpec `json:"spec,omitempty"` + Status CloudKittyProcStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// CloudKittyProcList contains a list of CloudKittyProc +type CloudKittyProcList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []CloudKittyProc `json:"items"` +} + +func init() { + SchemeBuilder.Register(&CloudKittyProc{}, &CloudKittyProcList{}) +} + +// IsReady - returns true if service is ready to serve requests +func (instance CloudKittyProc) IsReady() bool { + return instance.Generation == instance.Status.ObservedGeneration && + instance.Status.ReadyCount == *instance.Spec.Replicas && + (instance.Status.Conditions.IsTrue(condition.DeploymentReadyCondition) || + (instance.Status.Conditions.IsFalse(condition.DeploymentReadyCondition) && *instance.Spec.Replicas == 0)) +} + +// GetSpecTopologyRef - Returns the LastAppliedTopology Set in the Status +func (instance *CloudKittyProc) GetSpecTopologyRef() *topologyv1.TopoRef { + return instance.Spec.TopologyRef +} + +// GetLastAppliedTopology - Returns the LastAppliedTopology Set in the Status +func (instance *CloudKittyProc) GetLastAppliedTopology() *topologyv1.TopoRef { + return instance.Status.LastAppliedTopology +} + +// SetLastAppliedTopology - Sets the LastAppliedTopology value in the Status +func (instance *CloudKittyProc) SetLastAppliedTopology(topologyRef *topologyv1.TopoRef) { + instance.Status.LastAppliedTopology = topologyRef +} diff --git a/api/v1beta1/conditions.go b/api/v1beta1/conditions.go index f9db6290..d2ccd731 100644 --- a/api/v1beta1/conditions.go +++ b/api/v1beta1/conditions.go @@ -45,6 +45,24 @@ const ( // LoggingReadyCondition Status=True condition which indicates if the Logging is configured and operational LoggingReadyCondition condition.Type = "LoggingReady" + // CloudKittyReadyCondition Status=True condition which indicates if the CloudKitty is configured and operational + CloudKittyReadyCondition condition.Type = "CloudKittyReady" + + // CloudKittyAPIReadyCondition Status=True condition which indicates if the CloudKitty API is configured and operational + CloudKittyAPIReadyCondition condition.Type = "CloudKittyAPIReady" + + // CloudKittyProcReadyCondition Status=True condition which indicates if the CloudKitty Processor is configured and operational + CloudKittyProcReadyCondition condition.Type = "CloudKittyProcReady" + + // CloudKittyStorageInitReadyCondition Status=True condition which indicates if the CloudKitty Storage Init process has ran + CloudKittyStorageInitReadyCondition condition.Type = "CloudKittyStorageInitReady" + + // CloudKittyClientCertReadyCondition Status=True condition which indicates if the CloudKitty client certificate is ready for use + CloudKittyClientCertReadyCondition condition.Type = "CloudKittyClientCertReady" + + // CloudKittyLokiStackReadyCondition Status=True condition which indicates if the CloudKitty LokiStack is ready + CloudKittyLokiStackReadyCondition condition.Type = "CloudKittyLokiStackReady" + // LoggingCLONamespaceReadyCondition Status=True condition which indicates if the cluster-logging-operator namespace is created LoggingCLONamespaceReadyCondition condition.Type = "LoggingCLONamespaceReady" @@ -202,6 +220,95 @@ const ( // LoggingCLONamespaceFailedMessage LoggingCLONamespaceFailedMessage = "CLO Namespace %s does not exist" + // + // CloudKittyReady condition messages + // + // CloudKittyReadyInitMessage + CloudKittyReadyInitMessage = "CloudKitty not started" + + // CloudKittyReadyMessage + CloudKittyReadyMessage = "CloudKitty completed" + + // CloudKittyReadyErrorMessage + CloudKittyReadyErrorMessage = "CloudKitty error occured %s" + + // + // CloudKittyStorageInit condition messages + // + // CloudKittyStorageInitReadyInitMessage + CloudKittyStorageInitReadyInitMessage = "CloudKittyStorageInit not started" + + // CloudKittyStorageInitReadyMessage + CloudKittyStorageInitReadyMessage = "CloudKittyStorageInit completed" + + // CloudKittyStorageInitReadyRunning + CloudKittyStorageInitReadyRunningMessage = "CloudKittyStorageInit job still running" + + // CloudKittyStorageInitReadyErrorMessage + CloudKittyStorageInitReadyErrorMessage = "CloudKittyStorageInit job error occurred %s" + + // + // CloudKittyAPIReady condition messages + // + // CloudKittyAPIReadyInitMessage + CloudKittyAPIReadyInitMessage = "CloudKittyAPI not started" + + // CloudKittyAPIReadyMessage + CloudKittyAPIReadyMessage = "CloudKittyAPI completed" + + // CloudKittyAPIReadyErrorMessage + CloudKittyAPIReadyErrorMessage = "CloudKittyAPI error occured %s" + + // CloudKittyAPIReadyRunningMessage + CloudKittyAPIReadyRunningMessage = "CloudKittyAPI in progress" + + // + // CloudKittyProcReady condition messages + // + // CloudKittyProcReadyInitMessage + CloudKittyProcReadyInitMessage = "CloudKittyProc not started" + + // CloudKittyProcReadyMessage + CloudKittyProcReadyMessage = "CloudKittyProc completed" + + // CloudKittyProcReadyErrorMessage + CloudKittyProcReadyErrorMessage = "CloudKittyProc error occured %s" + + // CloudKittyProcReadyRunningMessage + CloudKittyProcReadyRunningMessage = "CloudKittyProc in progress" + + // + // CloudKittyClientCertReady condition messages + // + // CloudKittyClientCertReadyInitMessage + CloudKittyClientCertReadyInitMessage = "CloudKittyClientCert not created" + + // CloudKittyClientCertReadyMessage + CloudKittyClientCertReadyMessage = "CloudKittyClientCert ready for use" + + // CloudKittyClientCertReadyErrorMessage + CloudKittyClientCertReadyErrorMessage = "CloudKittyClientCert error occured %s" + + // CloudKittyClientCertReadyRunningMessage + CloudKittyClientCertReadyRunningMessage = "CloudKittyClientCert in progress" + + // + // CloudKittyLokiStackReady condition messages + // + // CloudKittyLokiStackReadyInitMessage + CloudKittyLokiStackReadyInitMessage = "CloudKittyLokiStack not created" + + // CloudKittyLokiStackReadyMessage + CloudKittyLokiStackReadyMessage = "CloudKittyLokiStack ready for use" + + // CloudKittyLokiStackReadyErrorMessage + CloudKittyLokiStackReadyErrorMessage = "CloudKittyLokiStack error occured %s" + + // CloudKittyLokiStackReadyRunningMessage + CloudKittyLokiStackReadyRunningMessage = "CloudKittyLokiStack in progress" + // CloudKittyLokiStackReadyRunningMessage + CloudKittyLokiStackUnableToOwnMessage = "Error occured when trying to own %s" + DashboardsNotEnabledMessage = "Dashboarding was not enabled, so no actions required" DashboardPrometheusRuleReadyInitMessage = "Dashboard PrometheusRule not started" diff --git a/api/v1beta1/telemetry_types.go b/api/v1beta1/telemetry_types.go index 7fb6a930..9e8e895a 100644 --- a/api/v1beta1/telemetry_types.go +++ b/api/v1beta1/telemetry_types.go @@ -17,13 +17,21 @@ limitations under the License. package v1beta1 import ( + "github.com/openstack-k8s-operators/lib-common/modules/common/service" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/util" - topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" ) +// APIOverrideSpec to override the generated manifest of several child resources. +type APIOverrideSpec struct { + // Override configuration for the Service created to serve traffic to the cluster. + // The key must be the endpoint type (public, internal) + Service map[service.Endpoint]service.RoutedOverrideSpec `json:"service,omitempty"` +} + // PasswordsSelector to identify the Service password from the Secret type PasswordsSelector struct { // CeilometerService - Selector to get the ceilometer service password from the Secret @@ -35,6 +43,11 @@ type PasswordsSelector struct { // +kubebuilder:validation:Optional // +kubebuilder:default:=AodhPassword AodhService string `json:"aodhService"` + + // CloudKittyService - Selector to get the CloudKitty service password from the Secret + // +kubebuilder:validation:Optional + // +kubebuilder:default:=CloudKittyPassword + CloudKittyService string `json:"cloudKittyService"` } // TelemetrySpec defines the desired state of Telemetry @@ -48,6 +61,10 @@ type TelemetrySpec struct { // +kubebuilder:validation:Optional // Ceilometer - Parameters related to the ceilometer service Ceilometer CeilometerSection `json:"ceilometer,omitempty"` + + // +kubebuilder:validation:Optional + // CloudKitty - Parameters related to the cloudkitty service + CloudKitty CloudKittySection `json:"cloudkitty,omitempty"` } // TelemetrySpecCore defines the desired state of Telemetry. This version has no image parameters and is used by OpenStackControlplane @@ -61,6 +78,10 @@ type TelemetrySpecCore struct { // +kubebuilder:validation:Optional // Ceilometer - Parameters related to the ceilometer service Ceilometer CeilometerSectionCore `json:"ceilometer,omitempty"` + + // +kubebuilder:validation:Optional + // CloudKitty - Parameters related to the cloudkitty service + CloudKitty CloudKittySectionCore `json:"cloudkitty,omitempty"` } // TelemetrySpecBase - @@ -167,6 +188,34 @@ type LoggingSection struct { LoggingSpec `json:",inline"` } +// CloudKittySpec defines the desired state of the cloudkitty service +type CloudKittySection struct { + // +kubebuilder:validation:Optional + // +kubebuilder:default=false + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:booleanSwitch"} + // Enabled - Whether OpenStack CloudKitty service should be deployed and managed + Enabled *bool `json:"enabled"` + + // +kubebuilder:validation:Optional + //+operator-sdk:csv:customresourcedefinitions:type=spec + // Template - Overrides to use when creating the OpenStack CloudKitty service + CloudKittySpec `json:",inline"` +} + +// CloudKittySpec defines the desired state of the cloudkitty service +type CloudKittySectionCore struct { + // +kubebuilder:validation:Optional + // +kubebuilder:default=false + // +operator-sdk:csv:customresourcedefinitions:type=spec,xDescriptors={"urn:alm:descriptor:com.tectonic.ui:booleanSwitch"} + // Enabled - Whether OpenStack CloudKitty service should be deployed and managed + Enabled *bool `json:"enabled"` + + // +kubebuilder:validation:Optional + //+operator-sdk:csv:customresourcedefinitions:type=spec + // Template - Overrides to use when creating the OpenStack CloudKitty service + CloudKittySpecCore `json:",inline"` +} + // TelemetryStatus defines the observed state of Telemetry type TelemetryStatus struct { // Map of hashes to track e.g. job status @@ -234,6 +283,10 @@ func SetupDefaultsTelemetry() { AodhEvaluatorContainerImageURL: util.GetEnvVar("RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT", AodhEvaluatorContainerImage), AodhNotifierContainerImageURL: util.GetEnvVar("RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT", AodhNotifierContainerImage), AodhListenerContainerImageURL: util.GetEnvVar("RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT", AodhListenerContainerImage), + + // CloudKitty + CloudKittyAPIContainerImageURL: util.GetEnvVar("RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT", CloudKittyAPIContainerImage), + CloudKittyProcContainerImageURL: util.GetEnvVar("RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT", CloudKittyProcContainerImage), } SetupTelemetryDefaults(telemetryDefaults) diff --git a/api/v1beta1/telemetry_webhook.go b/api/v1beta1/telemetry_webhook.go index 9eb28d85..5a9c3e20 100644 --- a/api/v1beta1/telemetry_webhook.go +++ b/api/v1beta1/telemetry_webhook.go @@ -46,6 +46,8 @@ type TelemetryDefaults struct { AodhEvaluatorContainerImageURL string AodhNotifierContainerImageURL string AodhListenerContainerImageURL string + CloudKittyAPIContainerImageURL string + CloudKittyProcContainerImageURL string } var telemetryDefaults TelemetryDefaults @@ -115,6 +117,12 @@ func (spec *TelemetrySpec) Default() { if spec.Autoscaling.AutoscalingSpec.Aodh.ListenerImage == "" { spec.Autoscaling.AutoscalingSpec.Aodh.ListenerImage = telemetryDefaults.AodhListenerContainerImageURL } + if spec.CloudKitty.CloudKittyAPI.ContainerImage == "" { + spec.CloudKitty.CloudKittyAPI.ContainerImage = telemetryDefaults.CloudKittyAPIContainerImageURL + } + if spec.CloudKitty.CloudKittyProc.ContainerImage == "" { + spec.CloudKitty.CloudKittyProc.ContainerImage = telemetryDefaults.CloudKittyProcContainerImageURL + } } // Default - set defaults for this Telemetry spec core @@ -148,12 +156,18 @@ func (r TelemetrySpec) ValidateCreate(basePath *field.Path, namespace string) fi var allErrs field.ErrorList allErrs = append(allErrs, r.ValidateTelemetryTopology(basePath, namespace)...) + if r.CloudKitty.Enabled != nil && *r.CloudKitty.Enabled { + allErrs = append(allErrs, r.CloudKitty.CloudKittySpec.ValidateCreate(basePath.Child("cloudkitty"), namespace)...) + } return allErrs } func (r TelemetrySpecCore) ValidateCreate(basePath *field.Path, namespace string) field.ErrorList { var allErrs field.ErrorList allErrs = append(allErrs, r.ValidateTelemetryTopology(basePath, namespace)...) + if r.CloudKitty.Enabled != nil && *r.CloudKitty.Enabled { + allErrs = append(allErrs, r.CloudKitty.CloudKittySpecCore.ValidateCreate(basePath.Child("cloudkitty"), namespace)...) + } return allErrs } @@ -178,12 +192,22 @@ func (r *Telemetry) ValidateUpdate(old runtime.Object) (admission.Warnings, erro } func (r TelemetrySpec) ValidateUpdate(old TelemetrySpec, basePath *field.Path, namespace string) field.ErrorList { - return r.ValidateCreate(basePath, namespace) + var allErrs field.ErrorList + + allErrs = append(allErrs, r.ValidateTelemetryTopology(basePath, namespace)...) + + if r.CloudKitty.Enabled != nil && *r.CloudKitty.Enabled { + allErrs = append(allErrs, r.CloudKitty.CloudKittySpec.ValidateUpdate(old.CloudKitty.CloudKittySpec, basePath.Child("cloudkitty"), namespace)...) + } + return allErrs } func (r TelemetrySpecCore) ValidateUpdate(old TelemetrySpecCore, basePath *field.Path, namespace string) field.ErrorList { var allErrs field.ErrorList allErrs = append(allErrs, r.ValidateTelemetryTopology(basePath, namespace)...) + if r.CloudKitty.Enabled != nil && *r.CloudKitty.Enabled { + allErrs = append(allErrs, r.CloudKitty.CloudKittySpecCore.ValidateUpdate(old.CloudKitty.CloudKittySpecCore, basePath.Child("cloudkitty"), namespace)...) + } return allErrs } @@ -217,6 +241,8 @@ func (spec *TelemetrySpecCore) ValidateTelemetryTopology(basePath *field.Path, n allErrs = append(allErrs, spec.Ceilometer.ValidateTopology(ceilPath, namespace)...) + // TODO: investigate whether a topology validation is needed for CloudKitty or MetricStorage + return allErrs } // ValidateTelemetryTopology - Returns an ErrorList if the Topology is referenced @@ -241,5 +267,7 @@ func (spec *TelemetrySpec) ValidateTelemetryTopology(basePath *field.Path, names allErrs = append(allErrs, spec.Ceilometer.ValidateTopology(ceilPath, namespace)...) + // TODO: investigate whether a topology validation is needed for CloudKitty or MetricStorage + return allErrs } diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index da8d5649..3a99ea0f 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -330,28 +330,586 @@ func (in *AutoscalingStatus) DeepCopy() *AutoscalingStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CASpec) DeepCopyInto(out *CASpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CASpec. +func (in *CASpec) DeepCopy() *CASpec { + if in == nil { + return nil + } + out := new(CASpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Ceilometer) DeepCopyInto(out *Ceilometer) { *out = *in out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - in.KSMStatus.DeepCopyInto(&out.KSMStatus) + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + in.KSMStatus.DeepCopyInto(&out.KSMStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ceilometer. +func (in *Ceilometer) DeepCopy() *Ceilometer { + if in == nil { + return nil + } + out := new(Ceilometer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ceilometer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CeilometerDefaults) DeepCopyInto(out *CeilometerDefaults) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerDefaults. +func (in *CeilometerDefaults) DeepCopy() *CeilometerDefaults { + if in == nil { + return nil + } + out := new(CeilometerDefaults) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CeilometerList) DeepCopyInto(out *CeilometerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ceilometer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerList. +func (in *CeilometerList) DeepCopy() *CeilometerList { + if in == nil { + return nil + } + out := new(CeilometerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CeilometerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CeilometerSection) DeepCopyInto(out *CeilometerSection) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + in.CeilometerSpec.DeepCopyInto(&out.CeilometerSpec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerSection. +func (in *CeilometerSection) DeepCopy() *CeilometerSection { + if in == nil { + return nil + } + out := new(CeilometerSection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CeilometerSectionCore) DeepCopyInto(out *CeilometerSectionCore) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + in.CeilometerSpecCore.DeepCopyInto(&out.CeilometerSpecCore) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerSectionCore. +func (in *CeilometerSectionCore) DeepCopy() *CeilometerSectionCore { + if in == nil { + return nil + } + out := new(CeilometerSectionCore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CeilometerSpec) DeepCopyInto(out *CeilometerSpec) { + *out = *in + in.CeilometerSpecCore.DeepCopyInto(&out.CeilometerSpecCore) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerSpec. +func (in *CeilometerSpec) DeepCopy() *CeilometerSpec { + if in == nil { + return nil + } + out := new(CeilometerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CeilometerSpecCore) DeepCopyInto(out *CeilometerSpecCore) { + *out = *in + out.PasswordSelectors = in.PasswordSelectors + if in.DefaultConfigOverwrite != nil { + in, out := &in.DefaultConfigOverwrite, &out.DefaultConfigOverwrite + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NetworkAttachmentDefinitions != nil { + in, out := &in.NetworkAttachmentDefinitions, &out.NetworkAttachmentDefinitions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KSMEnabled != nil { + in, out := &in.KSMEnabled, &out.KSMEnabled + *out = new(bool) + **out = **in + } + if in.MysqldExporterEnabled != nil { + in, out := &in.MysqldExporterEnabled, &out.MysqldExporterEnabled + *out = new(bool) + **out = **in + } + in.TLS.DeepCopyInto(&out.TLS) + in.KSMTLS.DeepCopyInto(&out.KSMTLS) + in.MysqldExporterTLS.DeepCopyInto(&out.MysqldExporterTLS) + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(map[string]string) + if **in != nil { + in, out := *in, *out + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + } + if in.TopologyRef != nil { + in, out := &in.TopologyRef, &out.TopologyRef + *out = new(topologyv1beta1.TopoRef) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerSpecCore. +func (in *CeilometerSpecCore) DeepCopy() *CeilometerSpecCore { + if in == nil { + return nil + } + out := new(CeilometerSpecCore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CeilometerStatus) DeepCopyInto(out *CeilometerStatus) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(condition.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Networks != nil { + in, out := &in.Networks, &out.Networks + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.MysqldExporterHash != nil { + in, out := &in.MysqldExporterHash, &out.MysqldExporterHash + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MysqldExporterExportedGaleras != nil { + in, out := &in.MysqldExporterExportedGaleras, &out.MysqldExporterExportedGaleras + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KSMHash != nil { + in, out := &in.KSMHash, &out.KSMHash + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.LastAppliedTopology != nil { + in, out := &in.LastAppliedTopology, &out.LastAppliedTopology + *out = new(topologyv1beta1.TopoRef) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerStatus. +func (in *CeilometerStatus) DeepCopy() *CeilometerStatus { + if in == nil { + return nil + } + out := new(CeilometerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKitty) DeepCopyInto(out *CloudKitty) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKitty. +func (in *CloudKitty) DeepCopy() *CloudKitty { + if in == nil { + return nil + } + out := new(CloudKitty) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudKitty) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyAPI) DeepCopyInto(out *CloudKittyAPI) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyAPI. +func (in *CloudKittyAPI) DeepCopy() *CloudKittyAPI { + if in == nil { + return nil + } + out := new(CloudKittyAPI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudKittyAPI) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyAPIList) DeepCopyInto(out *CloudKittyAPIList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudKittyAPI, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyAPIList. +func (in *CloudKittyAPIList) DeepCopy() *CloudKittyAPIList { + if in == nil { + return nil + } + out := new(CloudKittyAPIList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudKittyAPIList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyAPISpec) DeepCopyInto(out *CloudKittyAPISpec) { + *out = *in + out.CloudKittyTemplate = in.CloudKittyTemplate + in.CloudKittyAPITemplate.DeepCopyInto(&out.CloudKittyAPITemplate) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyAPISpec. +func (in *CloudKittyAPISpec) DeepCopy() *CloudKittyAPISpec { + if in == nil { + return nil + } + out := new(CloudKittyAPISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyAPIStatus) DeepCopyInto(out *CloudKittyAPIStatus) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.APIEndpoints != nil { + in, out := &in.APIEndpoints, &out.APIEndpoints + *out = make(map[string]map[string]string, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(condition.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ServiceIDs != nil { + in, out := &in.ServiceIDs, &out.ServiceIDs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NetworkAttachments != nil { + in, out := &in.NetworkAttachments, &out.NetworkAttachments + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.LastAppliedTopology != nil { + in, out := &in.LastAppliedTopology, &out.LastAppliedTopology + *out = new(topologyv1beta1.TopoRef) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyAPIStatus. +func (in *CloudKittyAPIStatus) DeepCopy() *CloudKittyAPIStatus { + if in == nil { + return nil + } + out := new(CloudKittyAPIStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyAPITemplate) DeepCopyInto(out *CloudKittyAPITemplate) { + *out = *in + in.CloudKittyAPITemplateCore.DeepCopyInto(&out.CloudKittyAPITemplateCore) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyAPITemplate. +func (in *CloudKittyAPITemplate) DeepCopy() *CloudKittyAPITemplate { + if in == nil { + return nil + } + out := new(CloudKittyAPITemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyAPITemplateCore) DeepCopyInto(out *CloudKittyAPITemplateCore) { + *out = *in + in.CloudKittyServiceTemplate.DeepCopyInto(&out.CloudKittyServiceTemplate) + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Override.DeepCopyInto(&out.Override) + in.TLS.DeepCopyInto(&out.TLS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyAPITemplateCore. +func (in *CloudKittyAPITemplateCore) DeepCopy() *CloudKittyAPITemplateCore { + if in == nil { + return nil + } + out := new(CloudKittyAPITemplateCore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyDefaults) DeepCopyInto(out *CloudKittyDefaults) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyDefaults. +func (in *CloudKittyDefaults) DeepCopy() *CloudKittyDefaults { + if in == nil { + return nil + } + out := new(CloudKittyDefaults) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyList) DeepCopyInto(out *CloudKittyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudKitty, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyList. +func (in *CloudKittyList) DeepCopy() *CloudKittyList { + if in == nil { + return nil + } + out := new(CloudKittyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudKittyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyProc) DeepCopyInto(out *CloudKittyProc) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyProc. +func (in *CloudKittyProc) DeepCopy() *CloudKittyProc { + if in == nil { + return nil + } + out := new(CloudKittyProc) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudKittyProc) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyProcList) DeepCopyInto(out *CloudKittyProcList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudKittyProc, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ceilometer. -func (in *Ceilometer) DeepCopy() *Ceilometer { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyProcList. +func (in *CloudKittyProcList) DeepCopy() *CloudKittyProcList { if in == nil { return nil } - out := new(Ceilometer) + out := new(CloudKittyProcList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Ceilometer) DeepCopyObject() runtime.Object { +func (in *CloudKittyProcList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -359,139 +917,216 @@ func (in *Ceilometer) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CeilometerDefaults) DeepCopyInto(out *CeilometerDefaults) { +func (in *CloudKittyProcSpec) DeepCopyInto(out *CloudKittyProcSpec) { *out = *in + out.CloudKittyTemplate = in.CloudKittyTemplate + in.CloudKittyProcTemplate.DeepCopyInto(&out.CloudKittyProcTemplate) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerDefaults. -func (in *CeilometerDefaults) DeepCopy() *CeilometerDefaults { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyProcSpec. +func (in *CloudKittyProcSpec) DeepCopy() *CloudKittyProcSpec { if in == nil { return nil } - out := new(CeilometerDefaults) + out := new(CloudKittyProcSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CeilometerList) DeepCopyInto(out *CeilometerList) { +func (in *CloudKittyProcStatus) DeepCopyInto(out *CloudKittyProcStatus) { *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Ceilometer, len(*in)) + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(condition.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.NetworkAttachments != nil { + in, out := &in.NetworkAttachments, &out.NetworkAttachments + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.LastAppliedTopology != nil { + in, out := &in.LastAppliedTopology, &out.LastAppliedTopology + *out = new(topologyv1beta1.TopoRef) + **out = **in + } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerList. -func (in *CeilometerList) DeepCopy() *CeilometerList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyProcStatus. +func (in *CloudKittyProcStatus) DeepCopy() *CloudKittyProcStatus { if in == nil { return nil } - out := new(CeilometerList) + out := new(CloudKittyProcStatus) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CeilometerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyProcTemplate) DeepCopyInto(out *CloudKittyProcTemplate) { + *out = *in + in.CloudKittyProcTemplateCore.DeepCopyInto(&out.CloudKittyProcTemplateCore) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyProcTemplate. +func (in *CloudKittyProcTemplate) DeepCopy() *CloudKittyProcTemplate { + if in == nil { + return nil } - return nil + out := new(CloudKittyProcTemplate) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CeilometerSection) DeepCopyInto(out *CeilometerSection) { +func (in *CloudKittyProcTemplateCore) DeepCopyInto(out *CloudKittyProcTemplateCore) { *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) + in.CloudKittyServiceTemplate.DeepCopyInto(&out.CloudKittyServiceTemplate) + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) **out = **in } - in.CeilometerSpec.DeepCopyInto(&out.CeilometerSpec) + in.TLS.DeepCopyInto(&out.TLS) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerSection. -func (in *CeilometerSection) DeepCopy() *CeilometerSection { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyProcTemplateCore. +func (in *CloudKittyProcTemplateCore) DeepCopy() *CloudKittyProcTemplateCore { if in == nil { return nil } - out := new(CeilometerSection) + out := new(CloudKittyProcTemplateCore) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CeilometerSectionCore) DeepCopyInto(out *CeilometerSectionCore) { +func (in *CloudKittySection) DeepCopyInto(out *CloudKittySection) { *out = *in if in.Enabled != nil { in, out := &in.Enabled, &out.Enabled *out = new(bool) **out = **in } - in.CeilometerSpecCore.DeepCopyInto(&out.CeilometerSpecCore) + in.CloudKittySpec.DeepCopyInto(&out.CloudKittySpec) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerSectionCore. -func (in *CeilometerSectionCore) DeepCopy() *CeilometerSectionCore { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittySection. +func (in *CloudKittySection) DeepCopy() *CloudKittySection { if in == nil { return nil } - out := new(CeilometerSectionCore) + out := new(CloudKittySection) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CeilometerSpec) DeepCopyInto(out *CeilometerSpec) { +func (in *CloudKittySectionCore) DeepCopyInto(out *CloudKittySectionCore) { *out = *in - in.CeilometerSpecCore.DeepCopyInto(&out.CeilometerSpecCore) + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } + in.CloudKittySpecCore.DeepCopyInto(&out.CloudKittySpecCore) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerSpec. -func (in *CeilometerSpec) DeepCopy() *CeilometerSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittySectionCore. +func (in *CloudKittySectionCore) DeepCopy() *CloudKittySectionCore { if in == nil { return nil } - out := new(CeilometerSpec) + out := new(CloudKittySectionCore) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CeilometerSpecCore) DeepCopyInto(out *CeilometerSpecCore) { +func (in *CloudKittyServiceTemplate) DeepCopyInto(out *CloudKittyServiceTemplate) { *out = *in - out.PasswordSelectors = in.PasswordSelectors - if in.DefaultConfigOverwrite != nil { - in, out := &in.DefaultConfigOverwrite, &out.DefaultConfigOverwrite - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(map[string]string) + if **in != nil { + in, out := *in, *out + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } } } - if in.NetworkAttachmentDefinitions != nil { - in, out := &in.NetworkAttachmentDefinitions, &out.NetworkAttachmentDefinitions + if in.CustomServiceConfigSecrets != nil { + in, out := &in.CustomServiceConfigSecrets, &out.CustomServiceConfigSecrets *out = make([]string, len(*in)) copy(*out, *in) } - if in.KSMEnabled != nil { - in, out := &in.KSMEnabled, &out.KSMEnabled - *out = new(bool) - **out = **in + in.Resources.DeepCopyInto(&out.Resources) + if in.NetworkAttachments != nil { + in, out := &in.NetworkAttachments, &out.NetworkAttachments + *out = make([]string, len(*in)) + copy(*out, *in) } - if in.MysqldExporterEnabled != nil { - in, out := &in.MysqldExporterEnabled, &out.MysqldExporterEnabled - *out = new(bool) + if in.TopologyRef != nil { + in, out := &in.TopologyRef, &out.TopologyRef + *out = new(topologyv1beta1.TopoRef) **out = **in } - in.TLS.DeepCopyInto(&out.TLS) - in.KSMTLS.DeepCopyInto(&out.KSMTLS) - in.MysqldExporterTLS.DeepCopyInto(&out.MysqldExporterTLS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyServiceTemplate. +func (in *CloudKittyServiceTemplate) DeepCopy() *CloudKittyServiceTemplate { + if in == nil { + return nil + } + out := new(CloudKittyServiceTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittySpec) DeepCopyInto(out *CloudKittySpec) { + *out = *in + in.CloudKittySpecBase.DeepCopyInto(&out.CloudKittySpecBase) + in.CloudKittyAPI.DeepCopyInto(&out.CloudKittyAPI) + in.CloudKittyProc.DeepCopyInto(&out.CloudKittyProc) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittySpec. +func (in *CloudKittySpec) DeepCopy() *CloudKittySpec { + if in == nil { + return nil + } + out := new(CloudKittySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittySpecBase) DeepCopyInto(out *CloudKittySpecBase) { + *out = *in + out.CloudKittyTemplate = in.CloudKittyTemplate if in.NodeSelector != nil { in, out := &in.NodeSelector, &out.NodeSelector *out = new(map[string]string) @@ -508,20 +1143,44 @@ func (in *CeilometerSpecCore) DeepCopyInto(out *CeilometerSpecCore) { *out = new(topologyv1beta1.TopoRef) **out = **in } + if in.PrometheusTLSCaCertSecret != nil { + in, out := &in.PrometheusTLSCaCertSecret, &out.PrometheusTLSCaCertSecret + *out = new(v1.SecretKeySelector) + (*in).DeepCopyInto(*out) + } + in.S3StorageConfig.DeepCopyInto(&out.S3StorageConfig) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerSpecCore. -func (in *CeilometerSpecCore) DeepCopy() *CeilometerSpecCore { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittySpecBase. +func (in *CloudKittySpecBase) DeepCopy() *CloudKittySpecBase { if in == nil { return nil } - out := new(CeilometerSpecCore) + out := new(CloudKittySpecBase) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CeilometerStatus) DeepCopyInto(out *CeilometerStatus) { +func (in *CloudKittySpecCore) DeepCopyInto(out *CloudKittySpecCore) { + *out = *in + in.CloudKittySpecBase.DeepCopyInto(&out.CloudKittySpecBase) + in.CloudKittyAPI.DeepCopyInto(&out.CloudKittyAPI) + in.CloudKittyProc.DeepCopyInto(&out.CloudKittyProc) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittySpecCore. +func (in *CloudKittySpecCore) DeepCopy() *CloudKittySpecCore { + if in == nil { + return nil + } + out := new(CloudKittySpecCore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyStatus) DeepCopyInto(out *CloudKittyStatus) { *out = *in if in.Hash != nil { in, out := &in.Hash, &out.Hash @@ -537,43 +1196,55 @@ func (in *CeilometerStatus) DeepCopyInto(out *CeilometerStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Networks != nil { - in, out := &in.Networks, &out.Networks - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.MysqldExporterHash != nil { - in, out := &in.MysqldExporterHash, &out.MysqldExporterHash - *out = make(map[string]string, len(*in)) + if in.APIEndpoints != nil { + in, out := &in.APIEndpoints, &out.APIEndpoints + *out = make(map[string]map[string]string, len(*in)) for key, val := range *in { - (*out)[key] = val + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal } } - if in.MysqldExporterExportedGaleras != nil { - in, out := &in.MysqldExporterExportedGaleras, &out.MysqldExporterExportedGaleras - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.KSMHash != nil { - in, out := &in.KSMHash, &out.KSMHash + if in.ServiceIDs != nil { + in, out := &in.ServiceIDs, &out.ServiceIDs *out = make(map[string]string, len(*in)) for key, val := range *in { (*out)[key] = val } } - if in.LastAppliedTopology != nil { - in, out := &in.LastAppliedTopology, &out.LastAppliedTopology - *out = new(topologyv1beta1.TopoRef) - **out = **in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyStatus. +func (in *CloudKittyStatus) DeepCopy() *CloudKittyStatus { + if in == nil { + return nil } + out := new(CloudKittyStatus) + in.DeepCopyInto(out) + return out } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CeilometerStatus. -func (in *CeilometerStatus) DeepCopy() *CeilometerStatus { +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudKittyTemplate) DeepCopyInto(out *CloudKittyTemplate) { + *out = *in + out.PasswordSelectors = in.PasswordSelectors +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudKittyTemplate. +func (in *CloudKittyTemplate) DeepCopy() *CloudKittyTemplate { if in == nil { return nil } - out := new(CeilometerStatus) + out := new(CloudKittyTemplate) in.DeepCopyInto(out) return out } @@ -899,6 +1570,78 @@ func (in *MonitoringStack) DeepCopy() *MonitoringStack { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageSchema) DeepCopyInto(out *ObjectStorageSchema) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSchema. +func (in *ObjectStorageSchema) DeepCopy() *ObjectStorageSchema { + if in == nil { + return nil + } + out := new(ObjectStorageSchema) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageSecretSpec) DeepCopyInto(out *ObjectStorageSecretSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSecretSpec. +func (in *ObjectStorageSecretSpec) DeepCopy() *ObjectStorageSecretSpec { + if in == nil { + return nil + } + out := new(ObjectStorageSecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageSpec) DeepCopyInto(out *ObjectStorageSpec) { + *out = *in + if in.Schemas != nil { + in, out := &in.Schemas, &out.Schemas + *out = make([]ObjectStorageSchema, len(*in)) + copy(*out, *in) + } + out.Secret = in.Secret + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(ObjectStorageTLSSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSpec. +func (in *ObjectStorageSpec) DeepCopy() *ObjectStorageSpec { + if in == nil { + return nil + } + out := new(ObjectStorageSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageTLSSpec) DeepCopyInto(out *ObjectStorageTLSSpec) { + *out = *in + out.CASpec = in.CASpec +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageTLSSpec. +func (in *ObjectStorageTLSSpec) DeepCopy() *ObjectStorageTLSSpec { + if in == nil { + return nil + } + out := new(ObjectStorageTLSSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PasswordsSelector) DeepCopyInto(out *PasswordsSelector) { *out = *in @@ -1030,6 +1773,7 @@ func (in *TelemetrySpec) DeepCopyInto(out *TelemetrySpec) { in.TelemetrySpecBase.DeepCopyInto(&out.TelemetrySpecBase) in.Autoscaling.DeepCopyInto(&out.Autoscaling) in.Ceilometer.DeepCopyInto(&out.Ceilometer) + in.CloudKitty.DeepCopyInto(&out.CloudKitty) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetrySpec. @@ -1081,6 +1825,7 @@ func (in *TelemetrySpecCore) DeepCopyInto(out *TelemetrySpecCore) { in.TelemetrySpecBase.DeepCopyInto(&out.TelemetrySpecBase) in.Autoscaling.DeepCopyInto(&out.Autoscaling) in.Ceilometer.DeepCopyInto(&out.Ceilometer) + in.CloudKitty.DeepCopyInto(&out.CloudKitty) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TelemetrySpecCore. diff --git a/ci/cloudkitty-pre_deploy-install_loki.yml b/ci/cloudkitty-pre_deploy-install_loki.yml new file mode 100644 index 00000000..347fe2eb --- /dev/null +++ b/ci/cloudkitty-pre_deploy-install_loki.yml @@ -0,0 +1,47 @@ +--- +- name: "Install loki for cloudkitty" + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + gather_facts: false + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + tasks: + - name: Set the loki-operator version to pin the version + ansible.builtin.set_fact: + loki_operator_version: "v6.3.0" + + - name: Deploy loki operator + ansible.builtin.shell: + cmd: | + oc apply -f {{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/telemetry-operator'].src_dir }}/ci/deploy-loki-for-ck.yaml + + - name: Get and approve the installplan when the version is pinned + when: loki_operator_version is defined + block: + - name: Get the installplan from the loki-operator subscription + ansible.builtin.shell: + cmd: | + oc get installplan -n openshift-operators-redhat | grep "loki-operator.{{ loki_operator_version }}" | awk '{print $1}' + retries: 10 + delay: 10 + register: loki_installplan + until: loki_installplan.stdout_lines | length != 0 + + - name: Show the loki_installplan from oc get installplan + ansible.builtin.debug: + var: loki_installplan + + - name: Approve the installation + ansible.builtin.shell: + cmd: | + oc patch -n openshift-operators-redhat installplan {{ loki_installplan.stdout }} --type='json' -p='[{"op": "replace", "path": "/spec/approved", "value":true}]' + + - name: Wait for the resources to be available + ansible.builtin.shell: + cmd: | + oc get csv | grep loki-operator + ignore_errors: true + register: output + until: output.stdout_lines | length == 1 and "Succeeded" in output.stdout + retries: 30 + delay: 10 diff --git a/ci/configure-cloudkitty.yml b/ci/configure-cloudkitty.yml new file mode 100644 index 00000000..10287a0e --- /dev/null +++ b/ci/configure-cloudkitty.yml @@ -0,0 +1,44 @@ +--- +- name: "Create the kustomization for deploying CloudKitty" + hosts: "{{ cifmw_target_hook_host | default('localhost') }}" + gather_facts: false + environment: + KUBECONFIG: "{{ cifmw_openshift_kubeconfig }}" + PATH: "{{ cifmw_path }}" + tasks: + - name: Copy controlplane kustomization + ansible.builtin.copy: + dest: "{{ cifmw_basedir }}/artifacts/manifests/kustomizations/controlplane/90-kustomize-controlplane-cloudkitty.yaml" + content: |- + apiVersion: kustomize.config.k8s.io/v1beta1 + kind: Kustomization + namespace: openstack + patches: + - patch: |- + apiVersion: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + metadata: + name: unused + spec: + # Set overall storage class so we don't need to increase the + # number of PVCs that install_yamls creates + # this is only applicable to crc-based jobs, it is not in + # openshift by default, but is included in the crc distribution + storageClass: crc-csi-hostpath-provisioner + telemetry: + enabled: true + template: + logging: + enabled: false + autoscaling: + enabled: false + cloudkitty: + enabled: true + s3StorageConfig: + secret: + type: "s3" + name: "cloudkitty-loki-s3" + metricStorage: + enabled: true + target: + kind: OpenStackControlPlane diff --git a/ci/deploy-loki-for-ck.yaml b/ci/deploy-loki-for-ck.yaml new file mode 100644 index 00000000..800b62ca --- /dev/null +++ b/ci/deploy-loki-for-ck.yaml @@ -0,0 +1,139 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-operators-redhat + labels: + name: openshift-operators-redhat +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: loki-operator + namespace: openshift-operators-redhat +spec: + upgradeStrategy: Default +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: loki-operator + namespace: openshift-operators-redhat +spec: + channel: stable-6.3 + installPlanApproval: Manual + startingCSV: loki-operator.v6.3.0 + name: loki-operator + source: redhat-operators + sourceNamespace: openshift-marketplace +--- +# Deploys a new Namespace for the MinIO Pod +apiVersion: v1 +kind: Namespace +metadata: + name: minio-dev # Change this value if you want a different namespace name + labels: + name: minio-dev # Change this value to match metadata.name +--- +# Deploys a new MinIO Pod into the metadata.namespace Kubernetes namespace +# +apiVersion: v1 +kind: Pod +metadata: + labels: + app: minio + name: minio + namespace: minio-dev # Change this value to match the namespace metadata.name +spec: + containers: + - name: minio + image: quay.io/minio/minio:latest + command: + - /bin/bash + - -c + - | + mkdir -p /data/loki && \ + minio server /data + env: + - name: MINIO_ACCESS_KEY + value: minio + - name: MINIO_SECRET_KEY + value: minio123 + volumeMounts: + - mountPath: /data + name: storage # Corresponds to the `spec.volumes` Persistent Volume + volumes: + - name: storage + persistentVolumeClaim: + claimName: minio-pvc +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: minio-pvc + namespace: minio-dev +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: crc-csi-hostpath-provisioner +--- +apiVersion: v1 +kind: Service +metadata: + name: minio + namespace: minio-dev +spec: + selector: + app: minio + ports: + - name: api + protocol: TCP + port: 9000 + - name: console + protocol: TCP + port: 9090 +--- +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + name: minio-console + namespace: minio-dev +spec: + host: console-minio-dev.apps-crc.testing + to: + kind: Service + name: minio + weight: 100 + port: + targetPort: console + wildcardPolicy: None +--- +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + name: minio-api + namespace: minio-dev +spec: + host: api-minio-dev.apps-crc.testing + to: + kind: Service + name: minio + weight: 100 + port: + targetPort: api + wildcardPolicy: None +--- +apiVersion: v1 +kind: Secret +metadata: + name: cloudkitty-loki-s3 + namespace: openstack +stringData: + access_key_id: minio + access_key_secret: minio123 + bucketnames: loki + endpoint: http://minio.minio-dev.svc.cluster.local:9000 + diff --git a/ci/vars-cloudkitty-tempest.yml b/ci/vars-cloudkitty-tempest.yml new file mode 100644 index 00000000..26263ee6 --- /dev/null +++ b/ci/vars-cloudkitty-tempest.yml @@ -0,0 +1,61 @@ +--- +cifmw_deploy_obs: true +cifmw_openshift_obs_definition: + apiVersion: operators.coreos.com/v1alpha1 + kind: Subscription + metadata: + name: observability-operator + namespace: openshift-operators + spec: + channel: stable + installPlanApproval: Automatic + name: cluster-observability-operator + source: redhat-operators + sourceNamespace: openshift-marketplace + +pre_deploy_kustomize_cloudkitty: + source: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/telemetry-operator'].src_dir }}/ci/configure-cloudkitty.yml" + type: playbook + +pre_deploy_loki_setup: + source: "{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/telemetry-operator'].src_dir }}/ci/cloudkitty-pre_deploy-install_loki.yml" + type: playbook +# test cloudkitty +cifmw_run_tests: true +cifmw_run_test_role: test_operator +# TODO: Consider switching to podified-master-centos10 for features that patch master +cifmw_test_operator_tempest_namespace: podified-antelope-centos9 +# cloudkitty tempest plugin is not part of the tempest rpm. +# https://review.rdoproject.org/cgit/openstack/tempest-distgit/tree/openstack-tempest.spec +# We need to add the cloudkitty-tempest-plugin package to RDO, same as TTTP +# https://review.rdoproject.org/cgit/openstack/telemetry-tempest-plugin-distgit/# +# For now, we can force install using the cifmw_test_operator_tempest_external_plugin below. +cifmw_test_operator_tempest_container: openstack-tempest-all +cifmw_test_operator_tempest_image_tag: 'current-podified' +# This value is used to populate the `tempestconfRun` parameter of the Tempest CR: https://openstack-k8s-operators.github.io/test-operator/crds.html#tempest-custom-resource +# https://github.com/openstack-k8s-operators/ci-framework/blob/main/roles/test_operator/defaults/main.yml +# TODO: Refine this tempest config +tempest_conf: + overrides: | + validation.run_validation true + identity.v3_endpoint_type public + service_available.ceilometer true + service_available.sg_core true + service_available.aodh true + service_available.cinder false + telemetry.sg_core_service_url "https://ceilometer-internal.openstack.svc.cluster.local:3000" + telemetry.prometheus_service_url "https://metric-storage-prometheus.openstack.svc.cluster.local:9090" + telemetry.ceilometer_polling_interval 120 + telemetry.prometheus_scrape_interval 30 + telemetry.alarm_threshold 50000000000 +cifmw_test_operator_tempest_tempestconf_config: "{{ tempest_conf }}" +cifmw_test_operator_tempest_include_list: | + ^tempest.*\[.*\bsmoke\b.*\] + cloudkitty_tempest_plugin.* + telemetry_tempest_plugin.* +# TODO: update this to allow multiple external plugins to be listed with Depends-On. +# Potentially, this can be done via the meta content provider, by adding the tempest images to the list. +external_plugin: "opendev.org/openstack/cloudkitty-tempest-plugin" +change_item: "{{ zuul['items'] | selectattr('project.canonical_name', 'equalto', external_plugin) }}" +# WORKAROUND: CloudKitty tempest is not packaged in RDO. Typically, the default would be [], since we would not require an external installation. +cifmw_test_operator_tempest_external_plugin: "{{ [ {'repository': 'https://' + external_plugin + '.git'} ] if change_item | length < 1 else [ { 'repository': 'https://' + external_plugin + '.git', 'changeRepository': 'https://review' + external_plugin, 'changeRefspec': [ 'refs/changes', change_item[0].change[-2:], change_item[0].change, change_item[0].patchset ] | join('/') } ] }}" diff --git a/config/crd/bases/telemetry.openstack.org_autoscalings.yaml b/config/crd/bases/telemetry.openstack.org_autoscalings.yaml index 7c7d682b..6b6d7c53 100644 --- a/config/crd/bases/telemetry.openstack.org_autoscalings.yaml +++ b/config/crd/bases/telemetry.openstack.org_autoscalings.yaml @@ -293,6 +293,11 @@ spec: description: CeilometerService - Selector to get the ceilometer service password from the Secret type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string type: object preserveJobs: default: false diff --git a/config/crd/bases/telemetry.openstack.org_ceilometers.yaml b/config/crd/bases/telemetry.openstack.org_ceilometers.yaml index 99fd948a..68c4de18 100644 --- a/config/crd/bases/telemetry.openstack.org_ceilometers.yaml +++ b/config/crd/bases/telemetry.openstack.org_ceilometers.yaml @@ -209,6 +209,11 @@ spec: description: CeilometerService - Selector to get the ceilometer service password from the Secret type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string type: object proxyImage: type: string diff --git a/config/crd/bases/telemetry.openstack.org_cloudkitties.yaml b/config/crd/bases/telemetry.openstack.org_cloudkitties.yaml new file mode 100644 index 00000000..5b5b615e --- /dev/null +++ b/config/crd/bases/telemetry.openstack.org_cloudkitties.yaml @@ -0,0 +1,809 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: cloudkitties.telemetry.openstack.org +spec: + group: telemetry.openstack.org + names: + kind: CloudKitty + listKind: CloudKittyList + plural: cloudkitties + singular: cloudkitty + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: CloudKitty is the Schema for the cloudkitties API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudKittySpec defines the desired state of CloudKitty + properties: + apiTimeout: + default: 60 + description: APITimeout for HAProxy, Apache, and rpc_response_timeout + type: integer + cloudKittyAPI: + description: CloudKittyAPI - Spec definition for the API service of + this CloudKitty deployment + properties: + containerImage: + description: ContainerImage - CloudKitty Container Image URL (will + be set to environmental default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + customServiceConfigSecrets: + description: |- + CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + that contain sensitive service config data. The content of each Secret gets added to the + /etc//.conf.d directory as a custom config file. + items: + type: string + type: array + x-kubernetes-list-type: atomic + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment + resource names to expose the services to the given network + items: + type: string + type: array + x-kubernetes-list-type: atomic + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting here overrides + any global NodeSelector settings within the CloudKitty CR. + type: object + override: + description: Override, provides the ability to override the generated + manifest of several child resources. + properties: + service: + additionalProperties: + description: |- + RoutedOverrideSpec - a routed service override configuration for the Service created to serve traffic + to the cluster. Allows for the manifest of the created Service to be overwritten with custom configuration. + properties: + endpointURL: + type: string + metadata: + description: |- + EmbeddedLabelsAnnotations is an embedded subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta. + Only labels and annotations are included. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + type: object + type: object + spec: + description: |- + OverrideServiceSpec is a subset of the fields included in https://pkg.go.dev/k8s.io/api@v0.26.6/core/v1#ServiceSpec + Limited to Type, SessionAffinity, LoadBalancerSourceRanges, ExternalName, ExternalTrafficPolicy, SessionAffinityConfig, + IPFamilyPolicy, LoadBalancerClass and InternalTrafficPolicy + properties: + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the + configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + description: |- + Override configuration for the Service created to serve traffic to the cluster. + The key must be the endpoint type (public, internal) + type: object + type: object + replicas: + default: 1 + description: Replicas - CloudKitty API Replicas + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tls: + description: TLS - Parameters related to the TLS + properties: + api: + description: API tls type which encapsulates for API services + properties: + internal: + description: Internal GenericService - holds the secret + for the internal endpoint + properties: + secretName: + description: SecretName - holding the cert, key for + the service + type: string + type: object + public: + description: Public GenericService - holds the secret + for the public endpoint + properties: + secretName: + description: SecretName - holding the cert, key for + the service + type: string + type: object + type: object + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs in + a pre-created bundle file + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service + references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + type: object + cloudKittyProc: + description: CloudKittyProc - Spec definition for the Scheduler service + of this CloudKitty deployment + properties: + containerImage: + description: ContainerImage - CloudKitty Container Image URL (will + be set to environmental default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + customServiceConfigSecrets: + description: |- + CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + that contain sensitive service config data. The content of each Secret gets added to the + /etc//.conf.d directory as a custom config file. + items: + type: string + type: array + x-kubernetes-list-type: atomic + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment + resource names to expose the services to the given network + items: + type: string + type: array + x-kubernetes-list-type: atomic + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting here overrides + any global NodeSelector settings within the CloudKitty CR. + type: object + replicas: + default: 1 + description: Replicas - CloudKitty API Replicas + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tls: + description: TLS - Parameters related to the TLS + properties: + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs in + a pre-created bundle file + type: string + secretName: + description: SecretName - holding the cert, key for the service + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service + references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + type: object + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config for all CloudKitty services using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + databaseAccount: + default: cloudkitty + description: DatabaseAccount - optional MariaDBAccount used for cloudkitty + DB, defaults to cloudkitty + type: string + databaseInstance: + default: openstack + description: |- + MariaDB instance name + Right now required by the maridb-operator to get the credentials from the instance to create the DB + Might not be required in future + type: string + lokiStackSize: + default: 1x.demo + description: Size of the LokiStack. Supported are "1x.demo" (default), + "1x.pico", "1x.extra-small", "1x.small", "1x.medium" + enum: + - "" + - 1x.demo + - 1x.pico + - 1x.extra-small + - 1x.small + - 1x.medium + type: string + memcachedInstance: + default: memcached + description: Memcached instance name. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting + NodeSelector here acts as a default value and can be overridden by service + specific NodeSelector Settings. + type: object + passwordSelector: + default: + cloudKittyService: CloudKittyPassword + description: PasswordsSelectors - Selectors to identify the ServiceUser + password from the Secret + properties: + aodhService: + default: AodhPassword + description: AodhService - Selector to get the aodh service password + from the Secret + type: string + ceilometerService: + default: CeilometerPassword + description: CeilometerService - Selector to get the ceilometer + service password from the Secret + type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string + type: object + period: + default: 300 + description: Period for collecting metrics in seconds + format: int32 + type: integer + preserveJobs: + default: false + description: PreserveJobs - do not delete jobs after they finished + e.g. to check logs + type: boolean + prometheusHost: + description: Host of user deployed prometheus + type: string + prometheusPort: + description: Port of user deployed prometheus + format: int32 + maximum: 65535 + minimum: 1 + type: integer + prometheusTLSCaCertSecret: + description: If defined, specifies which CA certificate to use for + user deployed prometheus + nullable: true + properties: + key: + description: The key of the secret to select from. Must be a + valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + rabbitMqClusterName: + default: rabbitmq + description: |- + RabbitMQ instance name + Needed to request a transportURL that is created and used in CloudKitty + type: string + s3StorageConfig: + default: + secret: + name: cloudkitty-loki-s3 + type: s3 + description: S3 related configuration passed to Loki + properties: + schemas: + default: + - effectiveDate: "2020-10-11" + version: v11 + description: Schemas for reading and writing logs. + items: + properties: + effectiveDate: + description: |- + EffectiveDate contains a date in YYYY-MM-DD format which is interpreted in the UTC time zone. + + The configuration always needs at least one schema that is currently valid. This means that when creating a new + CloudKitty it is recommended to add a schema with the latest available version and an effective date of "yesterday". + New schema versions added to the configuration always needs to be placed "in the future", so that Loki can start + using it once the day rolls over. + type: string + version: + description: Version for writing and reading logs. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + secret: + description: |- + Secret for object storage authentication. + Name of a secret in the same namespace as the CloudKitty custom resource. + properties: + credentialMode: + description: |- + CredentialMode can be used to set the desired credential mode for authenticating with the object storage. + If this is not set, then the operator tries to infer the credential mode from the provided secret and its + own configuration. + type: string + name: + description: Name of a secret in the namespace configured + for object storage secrets. + type: string + type: + description: Type of object storage that should be used + type: string + type: object + tls: + description: TLS configuration for reaching the object storage + endpoint. + properties: + caKey: + description: |- + Key is the data key of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the CloudKitty custom resource. + If empty, it defaults to "service-ca.crt". + type: string + caName: + description: |- + CA is the name of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the CloudKitty custom resource. + type: string + type: object + type: object + secret: + default: osp-secret + description: Secret containing OpenStack password information + type: string + serviceUser: + default: cloudkitty + description: ServiceUser - optional username used for this service + to register in cloudkitty + type: string + storageClass: + description: Storage class used for Loki + type: string + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + type: object + status: + description: CloudKittyStatus defines the observed state of CloudKitty + properties: + apiEndpoints: + additionalProperties: + additionalProperties: + type: string + type: object + description: API endpoints + type: object + cloudKittyAPIReadyCount: + default: 0 + description: ReadyCount of CloudKitty API instance + format: int32 + minimum: 0 + type: integer + cloudKittyProcReadyCounts: + default: 0 + description: ReadyCount of CloudKitty Processor instances + format: int32 + minimum: 0 + type: integer + conditions: + description: Conditions + items: + description: Condition defines an observation of a API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. + type: string + severity: + description: |- + Severity provides a classification of Reason code, so the current situation is immediately + understandable and could act accordingly. + It is meant for situations where Status=False and it should be indicated if it is just + informational, warning (next reconciliation might fix it) or an error (e.g. DB create issue + and no actions to automatically resolve the issue can/should be done). + For conditions where Status=Unknown or Status=True the Severity should be SeverityNone. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + databaseHostname: + description: CloudKitty Database Hostname + type: string + hash: + additionalProperties: + type: string + description: Map of hashes to track e.g. job status + type: object + observedGeneration: + description: |- + ObservedGeneration - the most recent generation observed for this service. + If the observed generation is different than the spec generation, then the + controller has not started processing the latest changes, and the status + and its conditions are likely stale. + format: int64 + type: integer + prometheusHostname: + description: PrometheusHost - Hostname for prometheus used for autoscaling + type: string + prometheusPort: + description: PrometheusPort - Port for prometheus used for autoscaling + format: int32 + type: integer + prometheusTLS: + description: PrometheusTLS - Determines if TLS should be used for + accessing prometheus + type: boolean + serviceIDs: + additionalProperties: + type: string + description: ServiceIDs + type: object + transportURLSecret: + description: TransportURLSecret - Secret containing RabbitMQ transportURL + type: string + required: + - cloudKittyAPIReadyCount + - cloudKittyProcReadyCounts + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/telemetry.openstack.org_cloudkittyapis.yaml b/config/crd/bases/telemetry.openstack.org_cloudkittyapis.yaml new file mode 100644 index 00000000..9cf9619d --- /dev/null +++ b/config/crd/bases/telemetry.openstack.org_cloudkittyapis.yaml @@ -0,0 +1,500 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: cloudkittyapis.telemetry.openstack.org +spec: + group: telemetry.openstack.org + names: + kind: CloudKittyAPI + listKind: CloudKittyAPIList + plural: cloudkittyapis + singular: cloudkittyapi + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: CloudKittyAPI is the Schema for the cloudkittyapis API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudKittyAPISpec defines the desired state of CloudKittyAPI + properties: + containerImage: + description: ContainerImage - CloudKitty Container Image URL (will + be set to environmental default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + customServiceConfigSecrets: + description: |- + CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + that contain sensitive service config data. The content of each Secret gets added to the + /etc//.conf.d directory as a custom config file. + items: + type: string + type: array + x-kubernetes-list-type: atomic + databaseAccount: + default: cloudkitty + description: DatabaseAccount - optional MariaDBAccount used for cloudkitty + DB, defaults to cloudkitty + type: string + databaseHostname: + description: DatabaseHostname - CloudKitty Database Hostname + type: string + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment resource + names to expose the services to the given network + items: + type: string + type: array + x-kubernetes-list-type: atomic + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting here overrides + any global NodeSelector settings within the CloudKitty CR. + type: object + override: + description: Override, provides the ability to override the generated + manifest of several child resources. + properties: + service: + additionalProperties: + description: |- + RoutedOverrideSpec - a routed service override configuration for the Service created to serve traffic + to the cluster. Allows for the manifest of the created Service to be overwritten with custom configuration. + properties: + endpointURL: + type: string + metadata: + description: |- + EmbeddedLabelsAnnotations is an embedded subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta. + Only labels and annotations are included. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + type: object + type: object + spec: + description: |- + OverrideServiceSpec is a subset of the fields included in https://pkg.go.dev/k8s.io/api@v0.26.6/core/v1#ServiceSpec + Limited to Type, SessionAffinity, LoadBalancerSourceRanges, ExternalName, ExternalTrafficPolicy, SessionAffinityConfig, + IPFamilyPolicy, LoadBalancerClass and InternalTrafficPolicy + properties: + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + description: |- + Override configuration for the Service created to serve traffic to the cluster. + The key must be the endpoint type (public, internal) + type: object + type: object + passwordSelector: + default: + cloudKittyService: CloudKittyPassword + description: PasswordsSelectors - Selectors to identify the ServiceUser + password from the Secret + properties: + aodhService: + default: AodhPassword + description: AodhService - Selector to get the aodh service password + from the Secret + type: string + ceilometerService: + default: CeilometerPassword + description: CeilometerService - Selector to get the ceilometer + service password from the Secret + type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string + type: object + replicas: + default: 1 + description: Replicas - CloudKitty API Replicas + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + secret: + default: osp-secret + description: Secret containing OpenStack password information + type: string + serviceAccount: + description: ServiceAccount - service account name used internally + to provide CloudKitty services the default SA name + type: string + serviceUser: + default: cloudkitty + description: ServiceUser - optional username used for this service + to register in cloudkitty + type: string + tls: + description: TLS - Parameters related to the TLS + properties: + api: + description: API tls type which encapsulates for API services + properties: + internal: + description: Internal GenericService - holds the secret for + the internal endpoint + properties: + secretName: + description: SecretName - holding the cert, key for the + service + type: string + type: object + public: + description: Public GenericService - holds the secret for + the public endpoint + properties: + secretName: + description: SecretName - holding the cert, key for the + service + type: string + type: object + type: object + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs in a pre-created + bundle file + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + transportURLSecret: + description: Secret containing RabbitMq transport URL + type: string + type: object + status: + description: CloudKittyAPIStatus defines the observed state of CloudKittyAPI + properties: + apiEndpoints: + additionalProperties: + additionalProperties: + type: string + type: object + description: API endpoints + type: object + conditions: + description: Conditions + items: + description: Condition defines an observation of a API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. + type: string + severity: + description: |- + Severity provides a classification of Reason code, so the current situation is immediately + understandable and could act accordingly. + It is meant for situations where Status=False and it should be indicated if it is just + informational, warning (next reconciliation might fix it) or an error (e.g. DB create issue + and no actions to automatically resolve the issue can/should be done). + For conditions where Status=Unknown or Status=True the Severity should be SeverityNone. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + hash: + additionalProperties: + type: string + description: Map of hashes to track e.g. job status + type: object + lastAppliedTopology: + description: LastAppliedTopology - the last applied Topology + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + networkAttachments: + additionalProperties: + items: + type: string + type: array + description: NetworkAttachments status of the deployment pods + type: object + observedGeneration: + description: |- + ObservedGeneration - the most recent generation observed for this service. + If the observed generation is different than the spec generation, then the + controller has not started processing the latest changes, and the status + and its conditions are likely stale. + format: int64 + type: integer + readyCount: + default: 0 + description: ReadyCount of CloudKitty API instances + format: int32 + minimum: 0 + type: integer + serviceIDs: + additionalProperties: + type: string + description: ServiceIDs + type: object + required: + - readyCount + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/telemetry.openstack.org_cloudkittyprocs.yaml b/config/crd/bases/telemetry.openstack.org_cloudkittyprocs.yaml new file mode 100644 index 00000000..1caddbd5 --- /dev/null +++ b/config/crd/bases/telemetry.openstack.org_cloudkittyprocs.yaml @@ -0,0 +1,325 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: cloudkittyprocs.telemetry.openstack.org +spec: + group: telemetry.openstack.org + names: + kind: CloudKittyProc + listKind: CloudKittyProcList + plural: cloudkittyprocs + singular: cloudkittyproc + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: NetworkAttachments + jsonPath: .status.networkAttachments + name: NetworkAttachments + type: string + - description: Status + jsonPath: .status.conditions[0].status + name: Status + type: string + - description: Message + jsonPath: .status.conditions[0].message + name: Message + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: CloudKittyProc is the Schema for the cloudkittprocs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: CloudKittyProcSpec defines the desired state of CloudKitty + Processor + properties: + containerImage: + description: ContainerImage - CloudKitty Container Image URL (will + be set to environmental default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + customServiceConfigSecrets: + description: |- + CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + that contain sensitive service config data. The content of each Secret gets added to the + /etc//.conf.d directory as a custom config file. + items: + type: string + type: array + x-kubernetes-list-type: atomic + databaseAccount: + default: cloudkitty + description: DatabaseAccount - optional MariaDBAccount used for cloudkitty + DB, defaults to cloudkitty + type: string + databaseHostname: + description: DatabaseHostname - CloudKitty Database Hostname + type: string + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment resource + names to expose the services to the given network + items: + type: string + type: array + x-kubernetes-list-type: atomic + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting here overrides + any global NodeSelector settings within the CloudKitty CR. + type: object + passwordSelector: + default: + cloudKittyService: CloudKittyPassword + description: PasswordsSelectors - Selectors to identify the ServiceUser + password from the Secret + properties: + aodhService: + default: AodhPassword + description: AodhService - Selector to get the aodh service password + from the Secret + type: string + ceilometerService: + default: CeilometerPassword + description: CeilometerService - Selector to get the ceilometer + service password from the Secret + type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string + type: object + replicas: + default: 1 + description: Replicas - CloudKitty API Replicas + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + secret: + default: osp-secret + description: Secret containing OpenStack password information + type: string + serviceAccount: + description: ServiceAccount - service account name used internally + to provide CloudKitty services the default SA name + type: string + serviceUser: + default: cloudkitty + description: ServiceUser - optional username used for this service + to register in cloudkitty + type: string + tls: + description: TLS - Parameters related to the TLS + properties: + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs in a pre-created + bundle file + type: string + secretName: + description: SecretName - holding the cert, key for the service + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + transportURLSecret: + description: Secret containing RabbitMq transport URL + type: string + type: object + status: + description: CloudKittyProcStatus defines the observed state of CloudKitty + Processor + properties: + conditions: + description: Conditions + items: + description: Condition defines an observation of a API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. + type: string + severity: + description: |- + Severity provides a classification of Reason code, so the current situation is immediately + understandable and could act accordingly. + It is meant for situations where Status=False and it should be indicated if it is just + informational, warning (next reconciliation might fix it) or an error (e.g. DB create issue + and no actions to automatically resolve the issue can/should be done). + For conditions where Status=Unknown or Status=True the Severity should be SeverityNone. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + hash: + additionalProperties: + type: string + description: Map of hashes to track e.g. job status + type: object + lastAppliedTopology: + description: LastAppliedTopology - the last applied Topology + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + networkAttachments: + additionalProperties: + items: + type: string + type: array + description: NetworkAttachments status of the deployment pods + type: object + observedGeneration: + description: |- + ObservedGeneration - the most recent generation observed for this service. + If the observed generation is different than the spec generation, then the + controller has not started processing the latest changes, and the status + and its conditions are likely stale. + format: int64 + type: integer + readyCount: + default: 0 + description: ReadyCount of CloudKitty Processor instances + format: int32 + minimum: 0 + type: integer + required: + - readyCount + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/telemetry.openstack.org_telemetries.yaml b/config/crd/bases/telemetry.openstack.org_telemetries.yaml index 11cf9354..e48a8056 100644 --- a/config/crd/bases/telemetry.openstack.org_telemetries.yaml +++ b/config/crd/bases/telemetry.openstack.org_telemetries.yaml @@ -296,6 +296,11 @@ spec: description: CeilometerService - Selector to get the ceilometer service password from the Secret type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string type: object preserveJobs: default: false @@ -526,6 +531,11 @@ spec: description: CeilometerService - Selector to get the ceilometer service password from the Secret type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string type: object proxyImage: type: string @@ -585,6 +595,677 @@ spec: - secret - sgCoreImage type: object + cloudkitty: + description: CloudKitty - Parameters related to the cloudkitty service + properties: + apiTimeout: + default: 60 + description: APITimeout for HAProxy, Apache, and rpc_response_timeout + type: integer + cloudKittyAPI: + description: CloudKittyAPI - Spec definition for the API service + of this CloudKitty deployment + properties: + containerImage: + description: ContainerImage - CloudKitty Container Image URL + (will be set to environmental default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + customServiceConfigSecrets: + description: |- + CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + that contain sensitive service config data. The content of each Secret gets added to the + /etc//.conf.d directory as a custom config file. + items: + type: string + type: array + x-kubernetes-list-type: atomic + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment + resource names to expose the services to the given network + items: + type: string + type: array + x-kubernetes-list-type: atomic + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting here overrides + any global NodeSelector settings within the CloudKitty CR. + type: object + override: + description: Override, provides the ability to override the + generated manifest of several child resources. + properties: + service: + additionalProperties: + description: |- + RoutedOverrideSpec - a routed service override configuration for the Service created to serve traffic + to the cluster. Allows for the manifest of the created Service to be overwritten with custom configuration. + properties: + endpointURL: + type: string + metadata: + description: |- + EmbeddedLabelsAnnotations is an embedded subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta. + Only labels and annotations are included. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + type: object + type: object + spec: + description: |- + OverrideServiceSpec is a subset of the fields included in https://pkg.go.dev/k8s.io/api@v0.26.6/core/v1#ServiceSpec + Limited to Type, SessionAffinity, LoadBalancerSourceRanges, ExternalName, ExternalTrafficPolicy, SessionAffinityConfig, + IPFamilyPolicy, LoadBalancerClass and InternalTrafficPolicy + properties: + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains + the configurations of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + description: |- + Override configuration for the Service created to serve traffic to the cluster. + The key must be the endpoint type (public, internal) + type: object + type: object + replicas: + default: 1 + description: Replicas - CloudKitty API Replicas + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tls: + description: TLS - Parameters related to the TLS + properties: + api: + description: API tls type which encapsulates for API services + properties: + internal: + description: Internal GenericService - holds the secret + for the internal endpoint + properties: + secretName: + description: SecretName - holding the cert, key + for the service + type: string + type: object + public: + description: Public GenericService - holds the secret + for the public endpoint + properties: + secretName: + description: SecretName - holding the cert, key + for the service + type: string + type: object + type: object + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs + in a pre-created bundle file + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service + references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + type: object + cloudKittyProc: + description: CloudKittyProc - Spec definition for the Scheduler + service of this CloudKitty deployment + properties: + containerImage: + description: ContainerImage - CloudKitty Container Image URL + (will be set to environmental default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + customServiceConfigSecrets: + description: |- + CustomServiceConfigSecrets - customize the service config using this parameter to specify Secrets + that contain sensitive service config data. The content of each Secret gets added to the + /etc//.conf.d directory as a custom config file. + items: + type: string + type: array + x-kubernetes-list-type: atomic + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment + resource names to expose the services to the given network + items: + type: string + type: array + x-kubernetes-list-type: atomic + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting here overrides + any global NodeSelector settings within the CloudKitty CR. + type: object + replicas: + default: 1 + description: Replicas - CloudKitty API Replicas + format: int32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + tls: + description: TLS - Parameters related to the TLS + properties: + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs + in a pre-created bundle file + type: string + secretName: + description: SecretName - holding the cert, key for the + service + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service + references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + type: object + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config for all CloudKitty services using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as a custom config file. + type: string + databaseAccount: + default: cloudkitty + description: DatabaseAccount - optional MariaDBAccount used for + cloudkitty DB, defaults to cloudkitty + type: string + databaseInstance: + default: openstack + description: |- + MariaDB instance name + Right now required by the maridb-operator to get the credentials from the instance to create the DB + Might not be required in future + type: string + enabled: + default: false + description: Enabled - Whether OpenStack CloudKitty service should + be deployed and managed + type: boolean + lokiStackSize: + default: 1x.demo + description: Size of the LokiStack. Supported are "1x.demo" (default), + "1x.pico", "1x.extra-small", "1x.small", "1x.medium" + enum: + - "" + - 1x.demo + - 1x.pico + - 1x.extra-small + - 1x.small + - 1x.medium + type: string + memcachedInstance: + default: memcached + description: Memcached instance name. + type: string + nodeSelector: + additionalProperties: + type: string + description: |- + NodeSelector to target subset of worker nodes running this service. Setting + NodeSelector here acts as a default value and can be overridden by service + specific NodeSelector Settings. + type: object + passwordSelector: + default: + cloudKittyService: CloudKittyPassword + description: PasswordsSelectors - Selectors to identify the ServiceUser + password from the Secret + properties: + aodhService: + default: AodhPassword + description: AodhService - Selector to get the aodh service + password from the Secret + type: string + ceilometerService: + default: CeilometerPassword + description: CeilometerService - Selector to get the ceilometer + service password from the Secret + type: string + cloudKittyService: + default: CloudKittyPassword + description: CloudKittyService - Selector to get the CloudKitty + service password from the Secret + type: string + type: object + period: + default: 300 + description: Period for collecting metrics in seconds + format: int32 + type: integer + preserveJobs: + default: false + description: PreserveJobs - do not delete jobs after they finished + e.g. to check logs + type: boolean + prometheusHost: + description: Host of user deployed prometheus + type: string + prometheusPort: + description: Port of user deployed prometheus + format: int32 + maximum: 65535 + minimum: 1 + type: integer + prometheusTLSCaCertSecret: + description: If defined, specifies which CA certificate to use + for user deployed prometheus + nullable: true + properties: + key: + description: The key of the secret to select from. Must be + a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + rabbitMqClusterName: + default: rabbitmq + description: |- + RabbitMQ instance name + Needed to request a transportURL that is created and used in CloudKitty + type: string + s3StorageConfig: + default: + secret: + name: cloudkitty-loki-s3 + type: s3 + description: S3 related configuration passed to Loki + properties: + schemas: + default: + - effectiveDate: "2020-10-11" + version: v11 + description: Schemas for reading and writing logs. + items: + properties: + effectiveDate: + description: |- + EffectiveDate contains a date in YYYY-MM-DD format which is interpreted in the UTC time zone. + + The configuration always needs at least one schema that is currently valid. This means that when creating a new + CloudKitty it is recommended to add a schema with the latest available version and an effective date of "yesterday". + New schema versions added to the configuration always needs to be placed "in the future", so that Loki can start + using it once the day rolls over. + type: string + version: + description: Version for writing and reading logs. + type: string + type: object + minItems: 1 + type: array + x-kubernetes-list-type: atomic + secret: + description: |- + Secret for object storage authentication. + Name of a secret in the same namespace as the CloudKitty custom resource. + properties: + credentialMode: + description: |- + CredentialMode can be used to set the desired credential mode for authenticating with the object storage. + If this is not set, then the operator tries to infer the credential mode from the provided secret and its + own configuration. + type: string + name: + description: Name of a secret in the namespace configured + for object storage secrets. + type: string + type: + description: Type of object storage that should be used + type: string + type: object + tls: + description: TLS configuration for reaching the object storage + endpoint. + properties: + caKey: + description: |- + Key is the data key of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the CloudKitty custom resource. + If empty, it defaults to "service-ca.crt". + type: string + caName: + description: |- + CA is the name of a ConfigMap containing a CA certificate. + It needs to be in the same namespace as the CloudKitty custom resource. + type: string + type: object + type: object + secret: + default: osp-secret + description: Secret containing OpenStack password information + type: string + serviceUser: + default: cloudkitty + description: ServiceUser - optional username used for this service + to register in cloudkitty + type: string + storageClass: + description: Storage class used for Loki + type: string + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service + references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + type: object logging: description: Logging - Parameters related to the logging properties: diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index b9d96f2b..a3ca37be 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -7,25 +7,34 @@ resources: - bases/telemetry.openstack.org_ceilometers.yaml - bases/telemetry.openstack.org_loggings.yaml - bases/telemetry.openstack.org_metricstorages.yaml +- bases/telemetry.openstack.org_cloudkittyapis.yaml +- bases/telemetry.openstack.org_cloudkittyprocs.yaml +- bases/telemetry.openstack.org_cloudkitties.yaml #+kubebuilder:scaffold:crdkustomizeresource patches: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD -#- path: patches/webhook_in_telemetries.yaml -#- path: patches/webhook_in_ceilometers.yaml -#- path: patches/webhook_in_autoscalings.yaml -#- path: patches/webhook_in_loggings.yaml -#- path: patches/webhook_in_metricstorages.yaml +#- patches/webhook_in_telemetries.yaml +#- patches/webhook_in_ceilometers.yaml +#- patches/webhook_in_autoscalings.yaml +#- patches/webhook_in_loggings.yaml +#- patches/webhook_in_metricstorages.yaml +#- patches/webhook_in_cloudkittyapis.yaml +#- patches/webhook_in_cloudkittyprocs.yaml +#- patches/webhook_in_cloudkitties.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD -#- path: patches/cainjection_in_telemetries.yaml -#- path: patches/cainjection_in_ceilometers.yaml -#- path: patches/cainjection_in_autoscalings.yaml -#- path: patches/cainjection_in_loggings.yaml -#- path: patches/cainjection_in_metricstorages.yaml +#- patches/cainjection_in_telemetries.yaml +#- patches/cainjection_in_ceilometers.yaml +#- patches/cainjection_in_autoscalings.yaml +#- patches/cainjection_in_loggings.yaml +#- patches/cainjection_in_metricstorages.yaml +#- patches/cainjection_in_cloudkittyapis.yaml +#- patches/cainjection_in_cloudkittyprocs.yaml +#- patches/cainjection_in_cloudkitties.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_cloudkitties.yaml b/config/crd/patches/cainjection_in_cloudkitties.yaml new file mode 100644 index 00000000..b0360335 --- /dev/null +++ b/config/crd/patches/cainjection_in_cloudkitties.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: cloudkitties.telemetry.openstack.org diff --git a/config/crd/patches/cainjection_in_cloudkittyapis.yaml b/config/crd/patches/cainjection_in_cloudkittyapis.yaml new file mode 100644 index 00000000..35526ad6 --- /dev/null +++ b/config/crd/patches/cainjection_in_cloudkittyapis.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: cloudkittyapis.telemetry.openstack.org diff --git a/config/crd/patches/cainjection_in_cloudkittyprocs.yaml b/config/crd/patches/cainjection_in_cloudkittyprocs.yaml new file mode 100644 index 00000000..bfc383ec --- /dev/null +++ b/config/crd/patches/cainjection_in_cloudkittyprocs.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: cloudkittyprocs.telemetry.openstack.org diff --git a/config/crd/patches/webhook_in_cloudkitties.yaml b/config/crd/patches/webhook_in_cloudkitties.yaml new file mode 100644 index 00000000..18a2c841 --- /dev/null +++ b/config/crd/patches/webhook_in_cloudkitties.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cloudkitties.telemetry.openstack.org +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_cloudkittyapis.yaml b/config/crd/patches/webhook_in_cloudkittyapis.yaml new file mode 100644 index 00000000..cad85de9 --- /dev/null +++ b/config/crd/patches/webhook_in_cloudkittyapis.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cloudkittyapis.telemetry.openstack.org +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_cloudkittyprocs.yaml b/config/crd/patches/webhook_in_cloudkittyprocs.yaml new file mode 100644 index 00000000..90c0d18e --- /dev/null +++ b/config/crd/patches/webhook_in_cloudkittyprocs.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: cloudkittyprocs.telemetry.openstack.org +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/rbac/cloudkitty_editor_role.yaml b/config/rbac/cloudkitty_editor_role.yaml new file mode 100644 index 00000000..940f3121 --- /dev/null +++ b/config/rbac/cloudkitty_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit cloudkitties. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: cloudkitty-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: telemetry-operator + app.kubernetes.io/part-of: telemetry-operator + app.kubernetes.io/managed-by: kustomize + name: cloudkitty-editor-role +rules: +- apiGroups: + - telemetry.openstack.org + resources: + - cloudkitties + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - telemetry.openstack.org + resources: + - cloudkitties/status + verbs: + - get diff --git a/config/rbac/cloudkitty_viewer_role.yaml b/config/rbac/cloudkitty_viewer_role.yaml new file mode 100644 index 00000000..253ecd75 --- /dev/null +++ b/config/rbac/cloudkitty_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view cloudkitties. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: cloudkitty-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: telemetry-operator + app.kubernetes.io/part-of: telemetry-operator + app.kubernetes.io/managed-by: kustomize + name: cloudkitty-viewer-role +rules: +- apiGroups: + - telemetry.openstack.org + resources: + - cloudkitties + verbs: + - get + - list + - watch +- apiGroups: + - telemetry.openstack.org + resources: + - cloudkitties/status + verbs: + - get diff --git a/config/rbac/cloudkittyapi_editor_role.yaml b/config/rbac/cloudkittyapi_editor_role.yaml new file mode 100644 index 00000000..2a0e0027 --- /dev/null +++ b/config/rbac/cloudkittyapi_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit cloudkittyapis. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: cloudkittyapi-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: telemetry-operator + app.kubernetes.io/part-of: telemetry-operator + app.kubernetes.io/managed-by: kustomize + name: cloudkittyapi-editor-role +rules: +- apiGroups: + - telemetry.openstack.org + resources: + - cloudkittyapis + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - telemetry.openstack.org + resources: + - cloudkittyapis/status + verbs: + - get diff --git a/config/rbac/cloudkittyapi_viewer_role.yaml b/config/rbac/cloudkittyapi_viewer_role.yaml new file mode 100644 index 00000000..56bc7181 --- /dev/null +++ b/config/rbac/cloudkittyapi_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view cloudkittyapis. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: cloudkittyapi-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: telemetry-operator + app.kubernetes.io/part-of: telemetry-operator + app.kubernetes.io/managed-by: kustomize + name: cloudkittyapi-viewer-role +rules: +- apiGroups: + - telemetry.openstack.org + resources: + - cloudkittyapis + verbs: + - get + - list + - watch +- apiGroups: + - telemetry.openstack.org + resources: + - cloudkittyapis/status + verbs: + - get diff --git a/config/rbac/cloudkittyproc_editor_role.yaml b/config/rbac/cloudkittyproc_editor_role.yaml new file mode 100644 index 00000000..dcb4eac3 --- /dev/null +++ b/config/rbac/cloudkittyproc_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit cloudkittyprocs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: cloudkittyproc-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: telemetry-operator + app.kubernetes.io/part-of: telemetry-operator + app.kubernetes.io/managed-by: kustomize + name: cloudkittyproc-editor-role +rules: +- apiGroups: + - telemetry.openstack.org + resources: + - cloudkittyprocs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - telemetry.openstack.org + resources: + - cloudkittyprocs/status + verbs: + - get diff --git a/config/rbac/cloudkittyproc_viewer_role.yaml b/config/rbac/cloudkittyproc_viewer_role.yaml new file mode 100644 index 00000000..a852a6f4 --- /dev/null +++ b/config/rbac/cloudkittyproc_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view cloudkittyprocs. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: cloudkittyproc-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: telemetry-operator + app.kubernetes.io/part-of: telemetry-operator + app.kubernetes.io/managed-by: kustomize + name: cloudkittyproc-viewer-role +rules: +- apiGroups: + - telemetry.openstack.org + resources: + - cloudkittyprocs + verbs: + - get + - list + - watch +- apiGroups: + - telemetry.openstack.org + resources: + - cloudkittyprocs/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 7ef77f99..d3a928c9 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -61,6 +61,53 @@ rules: - patch - update - watch +- apiGroups: + - cert-manager.io + resources: + - certificates + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cert-manager.io + resources: + - issuers + verbs: + - get + - list + - watch +- apiGroups: + - cloudkitty.openstack.org + resources: + - cloudkittyprocs + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - cloudkitty.openstack.org + resources: + - cloudkittyprocs/finalizers + verbs: + - patch + - update +- apiGroups: + - cloudkitty.openstack.org + resources: + - cloudkittyprocs/status + verbs: + - get + - patch + - update - apiGroups: - heat.openstack.org resources: @@ -98,6 +145,18 @@ rules: - patch - update - watch +- apiGroups: + - loki.grafana.com + resources: + - lokistacks + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - mariadb.openstack.org resources: @@ -235,11 +294,23 @@ rules: - securitycontextconstraints verbs: - use +- apiGroups: + - security.openshift.io + resourceNames: + - anyuid + - privileged + resources: + - securitycontextconstraints + verbs: + - use - apiGroups: - telemetry.openstack.org resources: - autoscalings - ceilometers + - cloudkitties + - cloudkittyapis + - cloudkittyprocs - loggings - metricstorages - telemetries @@ -256,6 +327,7 @@ rules: resources: - autoscalings/finalizers - ceilometers/finalizers + - cloudkitties/finalizers - loggings/finalizers - metricstorages/finalizers verbs: @@ -267,6 +339,9 @@ rules: resources: - autoscalings/status - ceilometers/status + - cloudkitties/status + - cloudkittyapis/status + - cloudkittyprocs/status - loggings/status - metricstorages/status - telemetries/status @@ -277,6 +352,8 @@ rules: - apiGroups: - telemetry.openstack.org resources: + - cloudkittyapis/finalizers + - cloudkittyprocs/finalizers - telemetries/finalizers verbs: - patch diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 7e00a20f..6e689eee 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -5,4 +5,7 @@ resources: - telemetry_v1beta1_autoscaling.yaml - telemetry_v1beta1_logging.yaml - telemetry_v1beta1_metricstorage.yaml +- telemetry_v1beta1_cloudkittyapi.yaml +- telemetry_v1beta1_cloudkittyproc.yaml +- telemetry_v1beta1_cloudkitty.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/telemetry_v1beta1_cloudkitty.yaml b/config/samples/telemetry_v1beta1_cloudkitty.yaml new file mode 100644 index 00000000..0f649ad3 --- /dev/null +++ b/config/samples/telemetry_v1beta1_cloudkitty.yaml @@ -0,0 +1,12 @@ +apiVersion: telemetry.openstack.org/v1beta1 +kind: CloudKitty +metadata: + labels: + app.kubernetes.io/name: cloudkitty + app.kubernetes.io/instance: cloudkitty-sample + app.kubernetes.io/part-of: telemetry-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: telemetry-operator + name: cloudkitty-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/telemetry_v1beta1_cloudkittyapi.yaml b/config/samples/telemetry_v1beta1_cloudkittyapi.yaml new file mode 100644 index 00000000..47725bf7 --- /dev/null +++ b/config/samples/telemetry_v1beta1_cloudkittyapi.yaml @@ -0,0 +1,12 @@ +apiVersion: telemetry.openstack.org/v1beta1 +kind: CloudKittyApi +metadata: + labels: + app.kubernetes.io/name: cloudkittyapi + app.kubernetes.io/instance: cloudkittyapi-sample + app.kubernetes.io/part-of: telemetry-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: telemetry-operator + name: cloudkittyapi-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/telemetry_v1beta1_cloudkittyproc.yaml b/config/samples/telemetry_v1beta1_cloudkittyproc.yaml new file mode 100644 index 00000000..94e90340 --- /dev/null +++ b/config/samples/telemetry_v1beta1_cloudkittyproc.yaml @@ -0,0 +1,12 @@ +apiVersion: telemetry.openstack.org/v1beta1 +kind: CloudKittyProc +metadata: + labels: + app.kubernetes.io/name: cloudkittyproc + app.kubernetes.io/instance: cloudkittyproc-sample + app.kubernetes.io/part-of: telemetry-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: telemetry-operator + name: cloudkittyproc-sample +spec: + # TODO(user): Add fields here diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 288d1e6f..c6b4c02c 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -44,6 +44,26 @@ webhooks: resources: - ceilometers sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-telemetry-openstack-org-v1beta1-cloudkitty + failurePolicy: Fail + name: mcloudkitty.kb.io + rules: + - apiGroups: + - telemetry.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - cloudkitties + sideEffects: None - admissionReviewVersions: - v1 clientConfig: @@ -130,6 +150,26 @@ webhooks: resources: - ceilometers sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-telemetry-openstack-org-v1beta1-cloudkitty + failurePolicy: Fail + name: vcloudkitty.kb.io + rules: + - apiGroups: + - telemetry.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - cloudkitties + sideEffects: None - admissionReviewVersions: - v1 clientConfig: diff --git a/controllers/cloudkitty_controller.go b/controllers/cloudkitty_controller.go new file mode 100644 index 00000000..64b10a1f --- /dev/null +++ b/controllers/cloudkitty_controller.go @@ -0,0 +1,1480 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "slices" + "strconv" + "time" + + lokistackv1 "github.com/grafana/loki/operator/api/loki/v1" + + "github.com/openstack-k8s-operators/telemetry-operator/pkg/cloudkitty" + "github.com/openstack-k8s-operators/telemetry-operator/pkg/metricstorage" + "github.com/openstack-k8s-operators/telemetry-operator/pkg/utils" + + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + certmgrv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + "github.com/go-logr/logr" + networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + memcachedv1 "github.com/openstack-k8s-operators/infra-operator/apis/memcached/v1beta1" + rabbitmqv1 "github.com/openstack-k8s-operators/infra-operator/apis/rabbitmq/v1beta1" + keystonev1 "github.com/openstack-k8s-operators/keystone-operator/api/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/certmanager" + "github.com/openstack-k8s-operators/lib-common/modules/common" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/configmap" + "github.com/openstack-k8s-operators/lib-common/modules/common/endpoint" + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/job" + "github.com/openstack-k8s-operators/lib-common/modules/common/labels" + nad "github.com/openstack-k8s-operators/lib-common/modules/common/networkattachment" + common_rbac "github.com/openstack-k8s-operators/lib-common/modules/common/rbac" + "github.com/openstack-k8s-operators/lib-common/modules/common/secret" + "github.com/openstack-k8s-operators/lib-common/modules/common/service" + "github.com/openstack-k8s-operators/lib-common/modules/common/tls" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" + mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" + telemetryv1 "github.com/openstack-k8s-operators/telemetry-operator/api/v1beta1" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetClient - +func (r *CloudKittyReconciler) GetClient() client.Client { + return r.Client +} + +// GetKClient - +func (r *CloudKittyReconciler) GetKClient() kubernetes.Interface { + return r.Kclient +} + +// GetScheme - +func (r *CloudKittyReconciler) GetScheme() *runtime.Scheme { + return r.Scheme +} + +// CloudKittyReconciler reconciles a CloudKitty object +type CloudKittyReconciler utils.ConditionalWatchingReconciler + +// GetLogger returns a logger object with a logging prefix of "controller.name" and additional controller context fields +func (r *CloudKittyReconciler) GetLogger(ctx context.Context) logr.Logger { + return log.FromContext(ctx).WithName("Controllers").WithName("CloudKitty") +} + +// +kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkitties,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkitties/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkitties/finalizers,verbs=update;patch +// +kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkittyapis,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkittyapis/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkittyapis/finalizers,verbs=update;patch +// +kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkittyprocs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkittyprocs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkittyprocs/finalizers,verbs=update;patch +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;create;update;patch;delete;watch +// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;create;update;patch;delete;watch +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;create;update;patch;delete;watch +// +kubebuilder:rbac:groups=mariadb.openstack.org,resources=mariadbdatabases,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=mariadb.openstack.org,resources=mariadbaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=mariadb.openstack.org,resources=mariadbaccounts/finalizers,verbs=update;patch +// +kubebuilder:rbac:groups=memcached.openstack.org,resources=memcacheds,verbs=get;list;watch; +// +kubebuilder:rbac:groups=keystone.openstack.org,resources=keystoneapis,verbs=get;list;watch +// +kubebuilder:rbac:groups=rabbitmq.openstack.org,resources=transporturls,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch +// +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=cert-manager.io,resources=issuers,verbs=get;list;watch +// +kubebuilder:rbac:groups=loki.grafana.com,resources=lokistacks,verbs=get;list;watch;create;update;patch;delete + +// service account, role, rolebinding +// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=roles,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=rolebindings,verbs=get;list;watch;create;update;patch +// service account permissions that are needed to grant permission to the above +// +kubebuilder:rbac:groups="security.openshift.io",resourceNames=anyuid;privileged,resources=securitycontextconstraints,verbs=use +// +kubebuilder:rbac:groups="",resources=pods,verbs=create;delete;get;list;patch;update;watch + +// Reconcile - +func (r *CloudKittyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { + Log := r.GetLogger(ctx) + + // Fetch the CloudKitty instance + instance := &telemetryv1.CloudKitty{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8s_errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. + // For additional cleanup logic use finalizers. Return and don't requeue. + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + Log.Error(err, fmt.Sprintf("could not fetch CloudKitty instance %s", instance.Name)) + return ctrl.Result{}, err + } + + helper, err := helper.NewHelper( + instance, + r.Client, + r.Kclient, + r.Scheme, + Log, + ) + if err != nil { + Log.Error(err, fmt.Sprintf("could not instantiate helper for instance %s", instance.Name)) + return ctrl.Result{}, err + } + + // + // initialize status + // + isNewInstance := instance.Status.Conditions == nil + if isNewInstance { + instance.Status.Conditions = condition.Conditions{} + } + + // Save a copy of the condtions so that we can restore the LastTransitionTime + // when a condition's state doesn't change. + savedConditions := instance.Status.Conditions.DeepCopy() + + // Always patch the instance status when exiting this function so we can persist any changes. + defer func() { + // Don't update the status, if reconciler Panics + if r := recover(); r != nil { + Log.Info(fmt.Sprintf("panic during reconcile %v\n", r)) + panic(r) + } + condition.RestoreLastTransitionTimes(&instance.Status.Conditions, savedConditions) + if instance.Status.Conditions.IsUnknown(condition.ReadyCondition) { + instance.Status.Conditions.Set( + instance.Status.Conditions.Mirror(condition.ReadyCondition)) + } + err := helper.PatchInstance(ctx, instance) + if err != nil { + _err = err + return + } + }() + + // Always initialize conditions used later as Status=Unknown + cl := condition.CreateList( + condition.UnknownCondition(condition.ReadyCondition, condition.InitReason, condition.ReadyInitMessage), + condition.UnknownCondition(condition.DBReadyCondition, condition.InitReason, condition.DBReadyInitMessage), + condition.UnknownCondition(condition.DBSyncReadyCondition, condition.InitReason, condition.DBSyncReadyInitMessage), + condition.UnknownCondition(telemetryv1.CloudKittyStorageInitReadyCondition, condition.InitReason, telemetryv1.CloudKittyStorageInitReadyInitMessage), + condition.UnknownCondition(condition.RabbitMqTransportURLReadyCondition, condition.InitReason, condition.RabbitMqTransportURLReadyInitMessage), + condition.UnknownCondition(condition.MemcachedReadyCondition, condition.InitReason, condition.MemcachedReadyInitMessage), + condition.UnknownCondition(condition.InputReadyCondition, condition.InitReason, condition.InputReadyInitMessage), + condition.UnknownCondition(condition.ServiceConfigReadyCondition, condition.InitReason, condition.ServiceConfigReadyInitMessage), + condition.UnknownCondition(telemetryv1.CloudKittyAPIReadyCondition, condition.InitReason, telemetryv1.CloudKittyAPIReadyInitMessage), + condition.UnknownCondition(telemetryv1.CloudKittyProcReadyCondition, condition.InitReason, telemetryv1.CloudKittyProcReadyInitMessage), + condition.UnknownCondition(telemetryv1.CloudKittyClientCertReadyCondition, condition.InitReason, telemetryv1.CloudKittyClientCertReadyInitMessage), + condition.UnknownCondition(telemetryv1.CloudKittyLokiStackReadyCondition, condition.InitReason, telemetryv1.CloudKittyLokiStackReadyInitMessage), + condition.UnknownCondition(condition.NetworkAttachmentsReadyCondition, condition.InitReason, condition.NetworkAttachmentsReadyInitMessage), + // service account, role, rolebinding conditions + condition.UnknownCondition(condition.ServiceAccountReadyCondition, condition.InitReason, condition.ServiceAccountReadyInitMessage), + condition.UnknownCondition(condition.RoleReadyCondition, condition.InitReason, condition.RoleReadyInitMessage), + condition.UnknownCondition(condition.RoleBindingReadyCondition, condition.InitReason, condition.RoleBindingReadyInitMessage), + ) + instance.Status.Conditions.Init(&cl) + // Always mark the Generation as observed early on + instance.Status.ObservedGeneration = instance.Generation + + // If we're not deleting this and the service object doesn't have our finalizer, add it. + if (instance.DeletionTimestamp.IsZero() && controllerutil.AddFinalizer(instance, helper.GetFinalizer())) || isNewInstance { + // Register overall status immediately to have an early feedback e.g. in the cli + return ctrl.Result{}, nil + } + + if instance.Status.Hash == nil { + instance.Status.Hash = map[string]string{} + } + if instance.Status.APIEndpoints == nil { + instance.Status.APIEndpoints = map[string]map[string]string{} + } + + // Handle service delete + if !instance.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, instance, helper) + } + + // Handle non-deleted clusters + return r.reconcileNormal(ctx, instance, helper) +} + +// fields to index to reconcile when change +const ( + cloudKittyPasswordSecretField = ".spec.secret" + //nolint:gosec // Not hardcoded credentials, just field name + cloudKittyCaBundleSecretNameField = ".spec.tls.caBundleSecretName" + cloudKittyTLSAPIInternalField = ".spec.tls.api.internal.secretName" + cloudKittyTLSAPIPublicField = ".spec.tls.api.public.secretName" + cloudKittyTopologyField = ".spec.topologyRef.Name" +) + +var ( + cloudKittyProcWatchFields = []string{ + cloudKittyPasswordSecretField, + cloudKittyCaBundleSecretNameField, + cloudKittyTopologyField, + } + cloudKittyAPIWatchFields = []string{ + cloudKittyPasswordSecretField, + cloudKittyCaBundleSecretNameField, + cloudKittyTLSAPIInternalField, + cloudKittyTLSAPIPublicField, + cloudKittyTopologyField, + } +) + +// SetupWithManager sets up the controller with the Manager. +func (r *CloudKittyReconciler) SetupWithManager(mgr ctrl.Manager) error { + // transportURLSecretFn - Watch for changes made to the secret associated with the RabbitMQ + // TransportURL created and used by CloudKitty CRs. Watch functions return a list of namespace-scoped + // CRs that then get fed to the reconciler. Hence, in this case, we need to know the name of the + // CloudKitty CR associated with the secret we are examining in the function. We could parse the name + // out of the "%s-cloudkitty-transport" secret label, which would be faster than getting the list of + // the CloudKitty CRs and trying to match on each one. The downside there, however, is that technically + // someone could randomly label a secret "something-cloudkitty-transport" where "something" actually + // matches the name of an existing CloudKitty CR. In that case changes to that secret would trigger + // reconciliation for a CloudKitty CR that does not need it. + // + // TODO: We also need a watch func to monitor for changes to the secret referenced by CloudKitty.Spec.Secret + transportURLSecretFn := func(ctx context.Context, o client.Object) []reconcile.Request { + result := []reconcile.Request{} + + Log := r.GetLogger(ctx) + + // get all CloudKitty CRs + cloudkitties := &telemetryv1.CloudKittyList{} + listOpts := []client.ListOption{ + client.InNamespace(o.GetNamespace()), + } + if err := r.List(ctx, cloudkitties, listOpts...); err != nil { + Log.Error(err, "Unable to retrieve CloudKitty CRs %v") + return nil + } + + for _, ownerRef := range o.GetOwnerReferences() { + if ownerRef.Kind == "TransportURL" { + for _, cr := range cloudkitties.Items { + if ownerRef.Name == fmt.Sprintf("%s-cloudkitty-transport", cr.Name) { + // return namespace and Name of CR + name := client.ObjectKey{ + Namespace: o.GetNamespace(), + Name: cr.Name, + } + Log.Info(fmt.Sprintf("TransportURL Secret %s belongs to TransportURL belonging to CloudKitty CR %s", o.GetName(), cr.Name)) + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + } + } + if len(result) > 0 { + return result + } + return nil + } + + memcachedFn := func(ctx context.Context, o client.Object) []reconcile.Request { + Log := r.GetLogger(ctx) + + result := []reconcile.Request{} + + // get all CloudKitty CRs + cloudkitties := &telemetryv1.CloudKittyList{} + listOpts := []client.ListOption{ + client.InNamespace(o.GetNamespace()), + } + if err := r.List(ctx, cloudkitties, listOpts...); err != nil { + Log.Error(err, "Unable to retrieve CloudKitty CRs %w") + return nil + } + + for _, cr := range cloudkitties.Items { + if o.GetName() == cr.Spec.MemcachedInstance { + name := client.ObjectKey{ + Namespace: o.GetNamespace(), + Name: cr.Name, + } + Log.Info(fmt.Sprintf("Memcached %s is used by CloudKitty CR %s", o.GetName(), cr.Name)) + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + if len(result) > 0 { + return result + } + return nil + } + + prometheusEndpointSecretFn := func(ctx context.Context, o client.Object) []reconcile.Request { + Log := r.GetLogger(ctx) + + result := []reconcile.Request{} + + // Only reconcile if this is the PrometheusEndpoint secret + if o.GetName() != cloudkitty.PrometheusEndpointSecret { + return nil + } + + // get all CloudKitty CRs + cloudkitties := &telemetryv1.CloudKittyList{} + listOpts := []client.ListOption{ + client.InNamespace(o.GetNamespace()), + } + if err := r.List(ctx, cloudkitties, listOpts...); err != nil { + Log.Error(err, "Unable to retrieve CloudKitty CRs %w") + return nil + } + + for _, cr := range cloudkitties.Items { + // Only reconcile CloudKitty CRs that are using MetricStorage (PrometheusHost is empty) + if cr.Spec.PrometheusHost == "" { + name := client.ObjectKey{ + Namespace: o.GetNamespace(), + Name: cr.Name, + } + Log.Info(fmt.Sprintf("PrometheusEndpoint Secret %s is used by CloudKitty CR %s", o.GetName(), cr.Name)) + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + if len(result) > 0 { + return result + } + return nil + } + + control, err := ctrl.NewControllerManagedBy(mgr). + For(&telemetryv1.CloudKitty{}). + Owns(&mariadbv1.MariaDBDatabase{}). + Owns(&mariadbv1.MariaDBAccount{}). + Owns(&telemetryv1.CloudKittyAPI{}). + Owns(&telemetryv1.CloudKittyProc{}). + Owns(&rabbitmqv1.TransportURL{}). + Owns(&batchv1.Job{}). + Owns(&corev1.Secret{}). + Owns(&corev1.ConfigMap{}). + Owns(&corev1.ServiceAccount{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Owns(&certmgrv1.Certificate{}). + // Watch for TransportURL Secrets which belong to any TransportURLs created by CloudKitty CRs + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(transportURLSecretFn)). + // Watch for PrometheusEndpoint Secret created by MetricStorage + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(prometheusEndpointSecretFn)). + Watches(&memcachedv1.Memcached{}, + handler.EnqueueRequestsFromMapFunc(memcachedFn)). + Watches(&keystonev1.KeystoneAPI{}, + handler.EnqueueRequestsFromMapFunc(r.findObjectForSrc), + builder.WithPredicates(keystonev1.KeystoneAPIStatusChangedPredicate)). + // LokiStack watch added dynamically inside the controller code. + Build(r) + r.Controller = control + return err +} + +func (r *CloudKittyReconciler) findObjectForSrc(ctx context.Context, src client.Object) []reconcile.Request { + requests := []reconcile.Request{} + + l := log.FromContext(ctx).WithName("Controllers").WithName("CloudKitty") + + crList := &telemetryv1.CloudKittyList{} + listOps := &client.ListOptions{ + Namespace: src.GetNamespace(), + } + err := r.List(ctx, crList, listOps) + if err != nil { + l.Error(err, fmt.Sprintf("listing %s for namespace: %s", crList.GroupVersionKind().Kind, src.GetNamespace())) + return requests + } + + for _, item := range crList.Items { + l.Info(fmt.Sprintf("input source %s changed, reconcile: %s - %s", src.GetName(), item.GetName(), item.GetNamespace())) + + requests = append(requests, + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: item.GetName(), + Namespace: item.GetNamespace(), + }, + }, + ) + } + + return requests +} + +func (r *CloudKittyReconciler) reconcileDelete(ctx context.Context, instance *telemetryv1.CloudKitty, helper *helper.Helper) (ctrl.Result, error) { + Log := r.GetLogger(ctx) + + Log.Info(fmt.Sprintf("Reconciling Service '%s' delete", instance.Name)) + + // remove db finalizer first + db, err := mariadbv1.GetDatabaseByNameAndAccount(ctx, helper, cloudkitty.DatabaseName, instance.Spec.DatabaseAccount, instance.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + return ctrl.Result{}, err + } + + if !k8s_errors.IsNotFound(err) { + if err := db.DeleteFinalizer(ctx, helper); err != nil { + return ctrl.Result{}, err + } + } + + // TODO: We might need to control how the sub-services (API and Proc) are + // deleted (when their parent CloudKitty CR is deleted) once we further develop their functionality + + // Service is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(instance, helper.GetFinalizer()) + Log.Info(fmt.Sprintf("Reconciled Service '%s' delete successfully", instance.Name)) + + return ctrl.Result{}, nil +} + +func (r *CloudKittyReconciler) reconcileInit( + ctx context.Context, + instance *telemetryv1.CloudKitty, + helper *helper.Helper, + serviceLabels map[string]string, + serviceAnnotations map[string]string, +) (ctrl.Result, error) { + Log := r.GetLogger(ctx) + + Log.Info(fmt.Sprintf("Reconciling Service '%s' init", instance.Name)) + + // + // run CloudKitty db sync + // + dbSyncHash := instance.Status.Hash[telemetryv1.CKDbSyncHash] + jobDbSyncDef := cloudkitty.DbSyncJob(instance, serviceLabels, serviceAnnotations) + + dbSyncjob := job.NewJob( + jobDbSyncDef, + telemetryv1.CKDbSyncHash, + instance.Spec.PreserveJobs, + cloudkitty.ShortDuration, + dbSyncHash, + ) + ctrlResult, err := dbSyncjob.DoJob( + ctx, + helper, + ) + if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DBSyncReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DBSyncReadyRunningMessage)) + return ctrlResult, nil + } + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DBSyncReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DBSyncReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + if dbSyncjob.HasChanged() { + instance.Status.Hash[telemetryv1.CKDbSyncHash] = dbSyncjob.GetHash() + Log.Info(fmt.Sprintf("Service '%s' - Job %s hash added - %s", instance.Name, jobDbSyncDef.Name, instance.Status.Hash[telemetryv1.CKDbSyncHash])) + } + instance.Status.Conditions.MarkTrue(condition.DBSyncReadyCondition, condition.DBSyncReadyMessage) + + // run CloudKitty db sync - end + + // + // run CloudKitty Storage Init + // + ckStorageInitHash := instance.Status.Hash[telemetryv1.CKStorageInitHash] + jobStorageInitDef := cloudkitty.StorageInitJob(instance, serviceLabels, serviceAnnotations) + + storageInitjob := job.NewJob( + jobStorageInitDef, + telemetryv1.CKStorageInitHash, + instance.Spec.PreserveJobs, + cloudkitty.ShortDuration, + ckStorageInitHash, + ) + ctrlResult, err = storageInitjob.DoJob( + ctx, + helper, + ) + if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyStorageInitReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + telemetryv1.CloudKittyStorageInitReadyRunningMessage)) + return ctrlResult, nil + } + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyStorageInitReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + telemetryv1.CloudKittyStorageInitReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + if storageInitjob.HasChanged() { + instance.Status.Hash[telemetryv1.CKStorageInitHash] = storageInitjob.GetHash() + Log.Info(fmt.Sprintf("Service '%s' - Job %s hash added - %s", instance.Name, jobStorageInitDef.Name, instance.Status.Hash[telemetryv1.CKStorageInitHash])) + } + instance.Status.Conditions.MarkTrue(telemetryv1.CloudKittyStorageInitReadyCondition, telemetryv1.CloudKittyStorageInitReadyMessage) + + // run CloudKitty Storage Init - end + + Log.Info(fmt.Sprintf("Reconciled Service '%s' init successfully", instance.Name)) + return ctrl.Result{}, nil +} + +// Original source: +// https://github.com/openstack-k8s-operators/openstack-operator/blob/cf133b39e91c05f53c57725d7c6f5a627d98dccd/pkg/openstack/ca.go#L687 +func getCAFromSecret( + ctx context.Context, + instance *telemetryv1.CloudKitty, + helper *helper.Helper, + secretName string, +) (string, ctrl.Result, error) { + caSecret, ctrlResult, err := secret.GetDataFromSecret(ctx, helper, secretName, time.Duration(5), "ca.crt") + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyLokiStackReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + telemetryv1.CloudKittyLokiStackReadyErrorMessage, + err.Error())) + + return "", ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyLokiStackReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + telemetryv1.CloudKittyLokiStackReadyRunningMessage)) + + return "", ctrlResult, nil + } + + return caSecret, ctrl.Result{}, nil +} + +func (r *CloudKittyReconciler) reconcileNormal(ctx context.Context, instance *telemetryv1.CloudKitty, helper *helper.Helper) (ctrl.Result, error) { + Log := r.GetLogger(ctx) + + Log.Info(fmt.Sprintf("Reconciling Service '%s'", instance.Name)) + + // Create cloudkitty client cert / key + certIssuer, err := certmanager.GetIssuerByLabels( + ctx, helper, instance.Namespace, + map[string]string{certmanager.RootCAIssuerInternalLabel: ""}, + ) + if err != nil { + Log.Error(err, "Failed to determine certificate issuer") + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyClientCertReadyCondition, + condition.ErrorReason, + condition.SeverityError, + telemetryv1.CloudKittyClientCertReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + certDefinition := cloudkitty.Certificate( + instance, serviceLabels, certIssuer, + ) + cert := certmanager.NewCertificate(certDefinition, 5*time.Second) + ctrlResult, _, err := cert.CreateOrPatch(ctx, helper, nil) + + if err != nil { + Log.Error(err, "Failed to create or patch cloudkitty client certificate") + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyClientCertReadyCondition, + condition.ErrorReason, + condition.SeverityError, + telemetryv1.CloudKittyClientCertReadyErrorMessage, + err.Error())) + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + Log.Info("CloudKitty client certificate is being created") + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyClientCertReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + telemetryv1.CloudKittyClientCertReadyRunningMessage)) + return ctrlResult, nil + } + + caData, ctrlResult, err := getCAFromSecret( + ctx, instance, helper, certDefinition.Spec.SecretName, + ) + if err != nil { + Log.Error(err, "Failed to get cloudkitty client certificate CA data") + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyClientCertReadyCondition, + condition.ErrorReason, + condition.SeverityError, + telemetryv1.CloudKittyClientCertReadyErrorMessage, + err.Error())) + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + cms := []util.Template{ + { + Name: fmt.Sprintf("%s-%s", instance.Name, cloudkitty.CaConfigmapName), + Namespace: instance.Namespace, + Type: util.TemplateTypeNone, + InstanceType: "cloudkitty", + CustomData: map[string]string{ + cloudkitty.CaConfigmapKey: caData, + }, + }, + } + + err = configmap.EnsureConfigMaps( + ctx, helper, instance, cms, nil, + ) + if err != nil { + Log.Error(err, "Failed to create CA configmap for cloudkitty client cert verification") + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyClientCertReadyCondition, + condition.ErrorReason, + condition.SeverityError, + telemetryv1.CloudKittyClientCertReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + instance.Status.Conditions.MarkTrue(telemetryv1.CloudKittyClientCertReadyCondition, telemetryv1.CloudKittyClientCertReadyMessage) + + // Deploy Loki + var eventHandler = handler.EnqueueRequestForOwner( + r.Scheme, + r.RESTMapper, + &telemetryv1.CloudKitty{}, + handler.OnlyControllerOwner(), + ) + + err = utils.EnsureWatches( + ctx, (*utils.ConditionalWatchingReconciler)(r), + "lokistacks.loki.grafana.com", + &lokistackv1.LokiStack{}, eventHandler, helper, + ) + if err != nil { + instance.Status.Conditions.MarkFalse(telemetryv1.CloudKittyLokiStackReadyCondition, + condition.Reason("Can't own LokiStack resource. The loki-operator probably isn't installed"), + condition.SeverityError, + telemetryv1.CloudKittyLokiStackUnableToOwnMessage, err) + Log.Info("Can't own LokiStack resource. The loki-operator probably isn't installed") + return ctrl.Result{RequeueAfter: telemetryv1.PauseBetweenWatchAttempts}, nil + } + + lokiStack := &lokistackv1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-lokistack", instance.Name), + Namespace: instance.Namespace, + }, + } + op, err := controllerutil.CreateOrPatch(ctx, r.Client, lokiStack, func() error { + desiredLokiStack, err := cloudkitty.LokiStack(instance, serviceLabels) + if err != nil { + return err + } + desiredLokiStack.Spec.DeepCopyInto(&lokiStack.Spec) + lokiStack.Labels = serviceLabels + err = controllerutil.SetControllerReference(instance, lokiStack, r.Scheme) + return err + }) + if err != nil { + Log.Error(err, "Failed to create or patch LokiStack") + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyLokiStackReadyCondition, + condition.ErrorReason, + condition.SeverityError, + telemetryv1.CloudKittyLokiStackReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + if op != controllerutil.OperationResultNone { + Log.Info(fmt.Sprintf("LokiStack %s successfully changed - operation: %s", lokiStack.Name, string(op))) + } + + // Mirror LokiStacks's condition here. LokiStack uses conditions + // a little differently than o-k-o. Whats more, it can have + // multiple 'active' conditions, while we have only one 'master' + // condition LokiStack here. So we mirror hopefully the one most + // relevant active condition in this order of + // priority: Ready > Failed > Degraded > Pending > Warning. + + order := []string{"Ready", "Failed", "Degraded", "Pending", "Warning"} + index := len(order) + reason := condition.InitReason + message := telemetryv1.CloudKittyLokiStackReadyInitMessage + for _, c := range lokiStack.Status.Conditions { + conditionIndex := slices.Index(order, c.Type) + if c.Status == "True" && conditionIndex < index { + index = conditionIndex + reason = c.Reason + message = c.Message + } + } + if index < len(order) && order[index] == "Ready" { + instance.Status.Conditions.MarkTrue(telemetryv1.CloudKittyLokiStackReadyCondition, telemetryv1.CloudKittyLokiStackReadyMessage) + } else { + Log.Info("LokiStack not ready") + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyLokiStackReadyCondition, + condition.Reason(reason), + condition.SeverityWarning, + "LokiStack issue: %s", message)) + } + + // Service account, role, binding + rbacRules := []rbacv1.PolicyRule{ + { + APIGroups: []string{"security.openshift.io"}, + ResourceNames: []string{"anyuid"}, + Resources: []string{"securitycontextconstraints"}, + Verbs: []string{"use"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"create", "get", "list", "watch", "update", "patch", "delete"}, + }, + } + rbacResult, err := common_rbac.ReconcileRbac(ctx, helper, instance, rbacRules) + if err != nil { + return rbacResult, err + } else if (rbacResult != ctrl.Result{}) { + return rbacResult, nil + } + + serviceLabels := map[string]string{ + common.AppSelector: cloudkitty.ServiceName, + } + + configVars := make(map[string]env.Setter) + + // + // create RabbitMQ transportURL CR and get the actual URL from the associated secret that is created + // + + transportURL, op, err := r.transportURLCreateOrUpdate(ctx, instance, serviceLabels) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.RabbitMqTransportURLReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.RabbitMqTransportURLReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + if op != controllerutil.OperationResultNone { + Log.Info(fmt.Sprintf("TransportURL %s successfully reconciled - operation: %s", transportURL.Name, string(op))) + } + + instance.Status.TransportURLSecret = transportURL.Status.SecretName + + if instance.Status.TransportURLSecret == "" { + Log.Info(fmt.Sprintf("Waiting for TransportURL %s secret to be created", transportURL.Name)) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.RabbitMqTransportURLReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.RabbitMqTransportURLReadyRunningMessage)) + return cloudkitty.ResultRequeue, nil + } + + instance.Status.Conditions.MarkTrue(condition.RabbitMqTransportURLReadyCondition, condition.RabbitMqTransportURLReadyMessage) + + // end transportURL + + // + // Check for required memcached used for caching + // + memcached, err := memcachedv1.GetMemcachedByName(ctx, helper, instance.Spec.MemcachedInstance, instance.Namespace) + if err != nil { + Log.Info(fmt.Sprintf("%s... requeueing", condition.MemcachedReadyWaitingMessage)) + if k8s_errors.IsNotFound(err) { + Log.Info(fmt.Sprintf("memcached %s not found", instance.Spec.MemcachedInstance)) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.MemcachedReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.MemcachedReadyWaitingMessage)) + return cloudkitty.ResultRequeue, nil + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.MemcachedReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.MemcachedReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + if !memcached.IsReady() { + Log.Info(fmt.Sprintf("%s... requeueing", condition.MemcachedReadyWaitingMessage)) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.MemcachedReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.MemcachedReadyWaitingMessage)) + return cloudkitty.ResultRequeue, nil + } + // Mark the Memcached Service as Ready if we get to this point with no errors + instance.Status.Conditions.MarkTrue( + condition.MemcachedReadyCondition, condition.MemcachedReadyMessage) + // run check memcached - end + + // + // Check for PrometheusEndpoint secret if using MetricStorage + // + if instance.Spec.PrometheusHost == "" { + prometheusEndpointSecret := &corev1.Secret{} + err = r.Get(ctx, client.ObjectKey{ + Name: cloudkitty.PrometheusEndpointSecret, + Namespace: instance.Namespace, + }, prometheusEndpointSecret) + if err != nil { + if k8s_errors.IsNotFound(err) { + Log.Info("PrometheusEndpoint Secret not found. CloudKitty will not be deployed until MetricStorage creates it.") + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.Reason("PrometheusEndpoint secret not found. The MetricStorage probably hasn't been created yet or isn't ready"), + condition.SeverityError, + "PrometheusEndpoint secret %s not found. Waiting for MetricStorage to create it", + cloudkitty.PrometheusEndpointSecret)) + return ctrl.Result{RequeueAfter: telemetryv1.PauseBetweenWatchAttempts}, nil + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + } + // run check PrometheusEndpoint secret - end + + // + // check for required OpenStack secret holding passwords for service/admin user and add hash to the vars map + // + + result, err := cloudkitty.VerifyServiceSecret( + ctx, + types.NamespacedName{Namespace: instance.Namespace, Name: instance.Spec.Secret}, + []string{ + instance.Spec.PasswordSelectors.CloudKittyService, + }, + helper.GetClient(), + &instance.Status.Conditions, + cloudkitty.NormalDuration, + &configVars, + ) + if err != nil { + return result, err + } else if (result != ctrl.Result{}) { + return result, nil + } + instance.Status.Conditions.MarkTrue(condition.InputReadyCondition, condition.InputReadyMessage) + // run check OpenStack secret - end + + db, result, err := r.ensureDB(ctx, helper, instance) + if err != nil { + return ctrl.Result{}, err + } else if (result != ctrl.Result{}) { + return result, nil + } + + // + // Create Secrets required as input for the Service and calculate an overall hash of hashes + // + err = r.generateServiceConfigs(ctx, helper, instance, &configVars, serviceLabels, memcached, db) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + // + // create hash over all the different input resources to identify if any those changed + // and a restart/recreate is required. + // + _, hashChanged, err := r.createHashOfInputHashes(ctx, instance, configVars) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } else if hashChanged { + Log.Info(fmt.Sprintf("%s... requeueing", condition.ServiceConfigReadyInitMessage)) + instance.Status.Conditions.MarkFalse( + condition.ServiceConfigReadyCondition, + condition.InitReason, + condition.SeverityInfo, + condition.ServiceConfigReadyInitMessage) + // Hash changed and instance status should be updated (which will be done by main defer func), + // so we need to return and reconcile again + return ctrl.Result{}, nil + } + + instance.Status.Conditions.MarkTrue(condition.ServiceConfigReadyCondition, condition.ServiceConfigReadyMessage) + + // + // TODO check when/if Init, Update, or Upgrade should/could be skipped + // + + // Check networks that the DBSync job will use in reconcileInit. The ones from the API service are always enough, + // it doesn't need the storage specific ones that volume or backup may have. + nadList := []networkv1.NetworkAttachmentDefinition{} + for _, netAtt := range instance.Spec.CloudKittyAPI.NetworkAttachments { + nad, err := nad.GetNADWithName(ctx, helper, netAtt, instance.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + Log.Info(fmt.Sprintf("network-attachment-definition %s not found", netAtt)) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.NetworkAttachmentsReadyWaitingMessage, + netAtt)) + //nolint:err113 // Using condition message format from lib-common + return cloudkitty.ResultRequeue, fmt.Errorf(condition.NetworkAttachmentsReadyWaitingMessage, netAtt) + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + if nad != nil { + nadList = append(nadList, *nad) + } + } + + instance.Status.Conditions.MarkTrue(condition.NetworkAttachmentsReadyCondition, condition.NetworkAttachmentsReadyMessage) + + serviceAnnotations, err := nad.EnsureNetworksAnnotation(nadList) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed create network annotation from %s: %w", + instance.Spec.CloudKittyAPI.NetworkAttachments, err) + } + + // Handle service init + ctrlResult, err = r.reconcileInit(ctx, instance, helper, serviceLabels, serviceAnnotations) + if err != nil { + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + // + // normal reconcile tasks + // + + // deploy cloudkitty-api + cloudKittyAPI, op, err := r.apiDeploymentCreateOrUpdate(ctx, instance) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyAPIReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + telemetryv1.CloudKittyAPIReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + if op != controllerutil.OperationResultNone { + Log.Info(fmt.Sprintf("API CR for %s successfully %s", instance.Name, string(op))) + } + + // Mirror values when the data in the StatefulSet is for the current generation + if cloudKittyAPI.Generation == cloudKittyAPI.Status.ObservedGeneration { + // Mirror CloudKittyAPI status' APIEndpoints and ReadyCount to this parent CR + instance.Status.APIEndpoints = cloudKittyAPI.Status.APIEndpoints + instance.Status.ServiceIDs = cloudKittyAPI.Status.ServiceIDs + instance.Status.CloudKittyAPIReadyCount = cloudKittyAPI.Status.ReadyCount + + // Mirror CloudKittyAPI's condition status + c := cloudKittyAPI.Status.Conditions.Mirror(telemetryv1.CloudKittyAPIReadyCondition) + if c != nil { + instance.Status.Conditions.Set(c) + } + } + + // deploy CloudKitty Processor + cloudKittyProc, op, err := r.procDeploymentCreateOrUpdate(ctx, instance) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyProcReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + telemetryv1.CloudKittyProcReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + if op != controllerutil.OperationResultNone { + Log.Info(fmt.Sprintf("Scheduler CR for %s successfully %s", instance.Name, string(op))) + } + + // Mirror values when the data in the StatefulSet is for the current generation + if cloudKittyProc.Generation == cloudKittyProc.Status.ObservedGeneration { + // Mirror CloudKitty Processor status' ReadyCount to this parent CR + instance.Status.CloudKittyProcReadyCount = cloudKittyProc.Status.ReadyCount + + // Mirror CloudKittyProc's condition status + c := cloudKittyProc.Status.Conditions.Mirror(telemetryv1.CloudKittyProcReadyCondition) + if c != nil { + instance.Status.Conditions.Set(c) + } + } + + err = mariadbv1.DeleteUnusedMariaDBAccountFinalizers(ctx, helper, cloudkitty.DatabaseName, instance.Spec.DatabaseAccount, instance.Namespace) + if err != nil { + return ctrl.Result{}, err + } + + Log.Info(fmt.Sprintf("Reconciled Service '%s' successfully", instance.Name)) + // update the overall status condition if service is ready + if instance.IsReady() { + instance.Status.Conditions.MarkTrue(condition.ReadyCondition, condition.ReadyMessage) + } + return ctrl.Result{}, nil +} + +// generateServiceConfigs - create Secret which hold scripts and service configuration +func (r *CloudKittyReconciler) generateServiceConfigs( + ctx context.Context, + h *helper.Helper, + instance *telemetryv1.CloudKitty, + envVars *map[string]env.Setter, + serviceLabels map[string]string, + memcached *memcachedv1.Memcached, + db *mariadbv1.Database, +) error { + // + // create Secret required for cloudkitty input + // - %-scripts holds scripts to e.g. bootstrap the service + // - %-config holds minimal cloudkitty config required to get the service up + // + + labels := labels.GetLabels(instance, labels.GetGroupLabel(cloudkitty.ServiceName), serviceLabels) + + var tlsCfg *tls.Service + if instance.Spec.CloudKittyAPI.TLS.CaBundleSecretName != "" { + tlsCfg = &tls.Service{} + } + + // customData hold any customization for all cloudkitty services. + customData := map[string]string{ + cloudkitty.CustomConfigFileName: instance.Spec.CustomServiceConfig, + cloudkitty.MyCnfFileName: db.GetDatabaseClientConfig(tlsCfg), //(mschuppert) for now just get the default my.cnf + } + + keystoneAPI, err := keystonev1.GetKeystoneAPI(ctx, h, instance.Namespace, map[string]string{}) + if err != nil { + return err + } + keystoneInternalURL, err := keystoneAPI.GetEndpoint(endpoint.EndpointInternal) + if err != nil { + return err + } + keystonePublicURL, err := keystoneAPI.GetEndpoint(endpoint.EndpointPublic) + if err != nil { + return err + } + + ospSecret, _, err := secret.GetSecret(ctx, h, instance.Spec.Secret, instance.Namespace) + if err != nil { + return err + } + + transportURLSecret, _, err := secret.GetSecret(ctx, h, instance.Status.TransportURLSecret, instance.Namespace) + if err != nil { + return err + } + + if instance.Spec.PrometheusHost == "" { + // We're using MetricStorage for Prometheus. + // Note: The secret existence is already checked in reconcileNormal(), so we can safely get it here + prometheusEndpointSecret := &corev1.Secret{} + err = r.Get(ctx, client.ObjectKey{ + Name: cloudkitty.PrometheusEndpointSecret, + Namespace: instance.Namespace, + }, prometheusEndpointSecret) + if err != nil { + return err + } + if prometheusEndpointSecret.Data != nil { + instance.Status.PrometheusHost = string(prometheusEndpointSecret.Data[metricstorage.PrometheusHost]) + port, err := strconv.Atoi(string(prometheusEndpointSecret.Data[metricstorage.PrometheusPort])) + if err != nil { + return err + } + //nolint:gosec // G109: Port number is read from a secret and validated to be within valid range + instance.Status.PrometheusPort = int32(port) + + metricStorage := &telemetryv1.MetricStorage{} + err = r.Get(ctx, client.ObjectKey{ + Namespace: instance.Namespace, + Name: telemetryv1.DefaultServiceName, + }, metricStorage) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return err + } + instance.Status.PrometheusTLS = metricStorage.Spec.PrometheusTLS.Enabled() + } + } else { + // We're using user-deployed Prometheus. + instance.Status.PrometheusHost = instance.Spec.PrometheusHost + instance.Status.PrometheusPort = instance.Spec.PrometheusPort + instance.Status.PrometheusTLS = instance.Spec.PrometheusTLSCaCertSecret != nil + } + + databaseAccount := db.GetAccount() + dbSecret := db.GetSecret() + + lokiHost := fmt.Sprintf("%s-lokistack-gateway-http.%s.svc", instance.Name, instance.Namespace) + + templateParameters := make(map[string]interface{}) + templateParameters["ServiceUser"] = instance.Spec.ServiceUser + templateParameters["ServicePassword"] = string(ospSecret.Data[instance.Spec.PasswordSelectors.CloudKittyService]) + templateParameters["KeystoneInternalURL"] = keystoneInternalURL + templateParameters["KeystonePublicURL"] = keystonePublicURL + templateParameters["TransportURL"] = string(transportURLSecret.Data["transport_url"]) + templateParameters["PrometheusHost"] = instance.Status.PrometheusHost + templateParameters["PrometheusPort"] = instance.Status.PrometheusPort + templateParameters["Period"] = instance.Spec.Period + templateParameters["LokiHost"] = lokiHost + templateParameters["LokiPort"] = 8080 + templateParameters["DatabaseConnection"] = fmt.Sprintf("mysql+pymysql://%s:%s@%s/%s?read_default_file=/etc/my.cnf", + databaseAccount.Spec.UserName, + string(dbSecret.Data[mariadbv1.DatabasePasswordSelector]), + instance.Status.DatabaseHostname, + cloudkitty.DatabaseName) + templateParameters["MemcachedServersWithInet"] = memcached.GetMemcachedServerListWithInetString() + templateParameters["TimeOut"] = instance.Spec.APITimeout + + templateParameters["TLS"] = false + if instance.Spec.CloudKittyProc.TLS.Enabled() { + templateParameters["TLS"] = true + templateParameters["CAFile"] = tls.DownstreamTLSCABundlePath + } + + // Set Prometheus TLS configuration + templateParameters["PrometheusTLS"] = instance.Status.PrometheusTLS + if instance.Status.PrometheusTLS { + // For operator-managed Prometheus or user-deployed Prometheus with custom CA, + // use the downstream TLS CA bundle path + templateParameters["PrometheusCAFile"] = tls.DownstreamTLSCABundlePath + } + + // create httpd vhost template parameters + httpdVhostConfig := map[string]interface{}{} + for _, endpt := range []service.Endpoint{service.EndpointInternal, service.EndpointPublic} { + endptConfig := map[string]interface{}{} + endptConfig["ServerName"] = fmt.Sprintf("%s-%s.%s.svc", cloudkitty.ServiceName, endpt.String(), instance.Namespace) + endptConfig["TLS"] = false // default TLS to false, and set it bellow to true if enabled + if instance.Spec.CloudKittyAPI.TLS.API.Enabled(endpt) { + endptConfig["TLS"] = true + endptConfig["SSLCertificateFile"] = fmt.Sprintf("/etc/pki/tls/certs/%s.crt", endpt.String()) + endptConfig["SSLCertificateKeyFile"] = fmt.Sprintf("/etc/pki/tls/private/%s.key", endpt.String()) + } + httpdVhostConfig[endpt.String()] = endptConfig + } + templateParameters["VHosts"] = httpdVhostConfig + + configTemplates := []util.Template{ + { + Name: fmt.Sprintf("%s-scripts", instance.Name), + Namespace: instance.Namespace, + Type: util.TemplateTypeScripts, + InstanceType: instance.Kind, + Labels: labels, + }, + { + Name: fmt.Sprintf("%s-config-data", instance.Name), + Namespace: instance.Namespace, + Type: util.TemplateTypeConfig, + InstanceType: instance.Kind, + CustomData: customData, + ConfigOptions: templateParameters, + Labels: labels, + }, + } + + return secret.EnsureSecrets(ctx, h, instance, configTemplates, envVars) +} + +// createHashOfInputHashes - creates a hash of hashes which gets added to the resources which requires a restart +// if any of the input resources change, like configs, passwords, ... +// +// returns the hash, whether the hash changed (as a bool) and any error +func (r *CloudKittyReconciler) createHashOfInputHashes( + ctx context.Context, + instance *telemetryv1.CloudKitty, + envVars map[string]env.Setter, +) (string, bool, error) { + Log := r.GetLogger(ctx) + + var hashMap map[string]string + changed := false + mergedMapVars := env.MergeEnvs([]corev1.EnvVar{}, envVars) + hash, err := util.ObjectHash(mergedMapVars) + if err != nil { + return hash, changed, err + } + if hashMap, changed = util.SetHash(instance.Status.Hash, common.InputHashName, hash); changed { + instance.Status.Hash = hashMap + Log.Info(fmt.Sprintf("Input maps hash %s - %s", common.InputHashName, hash)) + } + return hash, changed, nil +} + +func (r *CloudKittyReconciler) transportURLCreateOrUpdate( + ctx context.Context, + instance *telemetryv1.CloudKitty, + serviceLabels map[string]string, +) (*rabbitmqv1.TransportURL, controllerutil.OperationResult, error) { + transportURL := &rabbitmqv1.TransportURL{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-transport", instance.Name), + Namespace: instance.Namespace, + Labels: serviceLabels, + }, + } + + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, transportURL, func() error { + transportURL.Spec.RabbitmqClusterName = instance.Spec.RabbitMqClusterName + + err := controllerutil.SetControllerReference(instance, transportURL, r.Scheme) + return err + }) + + return transportURL, op, err +} + +func (r *CloudKittyReconciler) apiDeploymentCreateOrUpdate(ctx context.Context, instance *telemetryv1.CloudKitty) (*telemetryv1.CloudKittyAPI, controllerutil.OperationResult, error) { + cloudkittyAPISpec := telemetryv1.CloudKittyAPISpec{ + CloudKittyTemplate: instance.Spec.CloudKittyTemplate, + CloudKittyAPITemplate: instance.Spec.CloudKittyAPI, + DatabaseHostname: instance.Status.DatabaseHostname, + TransportURLSecret: instance.Status.TransportURLSecret, + ServiceAccount: instance.RbacResourceName(), + } + + if cloudkittyAPISpec.NodeSelector == nil { + cloudkittyAPISpec.NodeSelector = instance.Spec.NodeSelector + } + + // If topology is not present in the underlying CloudKittyAPI Spec, + // inherit from the top-level CR + if cloudkittyAPISpec.TopologyRef == nil { + cloudkittyAPISpec.TopologyRef = instance.Spec.TopologyRef + } + + deployment := &telemetryv1.CloudKittyAPI{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-api", instance.Name), + Namespace: instance.Namespace, + }, + } + + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, deployment, func() error { + deployment.Spec = cloudkittyAPISpec + + err := controllerutil.SetControllerReference(instance, deployment, r.Scheme) + if err != nil { + return err + } + + return nil + }) + + return deployment, op, err +} + +func (r *CloudKittyReconciler) procDeploymentCreateOrUpdate(ctx context.Context, instance *telemetryv1.CloudKitty) (*telemetryv1.CloudKittyProc, controllerutil.OperationResult, error) { + cloudKittyProcSpec := telemetryv1.CloudKittyProcSpec{ + CloudKittyTemplate: instance.Spec.CloudKittyTemplate, + CloudKittyProcTemplate: instance.Spec.CloudKittyProc, + DatabaseHostname: instance.Status.DatabaseHostname, + TransportURLSecret: instance.Status.TransportURLSecret, + ServiceAccount: instance.RbacResourceName(), + //TLS: instance.Spec.CloudKittyProc.TLS.Ca, + } + + if cloudKittyProcSpec.NodeSelector == nil { + cloudKittyProcSpec.NodeSelector = instance.Spec.NodeSelector + } + + // If topology is not present in the underlying Scheduler Spec + // inherit from the top-level CR + if cloudKittyProcSpec.TopologyRef == nil { + cloudKittyProcSpec.TopologyRef = instance.Spec.TopologyRef + } + + deployment := &telemetryv1.CloudKittyProc{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-proc", instance.Name), + Namespace: instance.Namespace, + }, + } + + op, err := controllerutil.CreateOrUpdate(ctx, r.Client, deployment, func() error { + deployment.Spec = cloudKittyProcSpec + + err := controllerutil.SetControllerReference(instance, deployment, r.Scheme) + if err != nil { + return err + } + + return nil + }) + + return deployment, op, err +} + +func (r *CloudKittyReconciler) ensureDB( + ctx context.Context, + h *helper.Helper, + instance *telemetryv1.CloudKitty, +) (*mariadbv1.Database, ctrl.Result, error) { + Log := r.GetLogger(ctx) + + // ensure MariaDBAccount exists. This account record may be created by + // openstack-operator or the cloud operator up front without a specific + // MariaDBDatabase configured yet. Otherwise, a MariaDBAccount CR is + // created here with a generated username as well as a secret with + // generated password. The MariaDBAccount is created without being + // yet associated with any MariaDBDatabase. + _, _, err := mariadbv1.EnsureMariaDBAccount( + ctx, h, instance.Spec.DatabaseAccount, + instance.Namespace, false, "cloudkitty", + ) + + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + mariadbv1.MariaDBAccountReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + mariadbv1.MariaDBAccountNotReadyMessage, + err.Error())) + + return nil, ctrl.Result{}, err + } + instance.Status.Conditions.MarkTrue( + mariadbv1.MariaDBAccountReadyCondition, + mariadbv1.MariaDBAccountReadyMessage, + ) + + db := mariadbv1.NewDatabaseForAccount( + instance.Spec.DatabaseInstance, // mariadb/galera service to target + cloudkitty.DatabaseName, // name used in CREATE DATABASE in mariadb + cloudkitty.DatabaseName, // CR name for MariaDBDatabase + instance.Spec.DatabaseAccount, // CR name for MariaDBAccount + instance.Namespace, // namespace + ) + + // create or patch the DB + ctrlResult, err := db.CreateOrPatchAll(ctx, h) + + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DBReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DBReadyErrorMessage, + err.Error())) + return db, ctrl.Result{}, err + } + if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DBReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DBReadyRunningMessage)) + return db, ctrlResult, nil + } + // wait for the DB to be setup + // (ksambor) should we use WaitForDBCreatedWithTimeout instead? + ctrlResult, err = db.WaitForDBCreated(ctx, h) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DBReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DBReadyErrorMessage, + err.Error())) + return db, ctrlResult, err + } + if (ctrlResult != ctrl.Result{}) { + Log.Info(fmt.Sprintf("%s... requeueing", condition.DBReadyRunningMessage)) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DBReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DBReadyRunningMessage)) + return db, ctrlResult, nil + } + + // update Status.DatabaseHostname, used to config the service + instance.Status.DatabaseHostname = db.GetDatabaseHostname() + instance.Status.Conditions.MarkTrue(condition.DBReadyCondition, condition.DBReadyMessage) + return db, ctrlResult, nil +} diff --git a/controllers/cloudkittyapi_controller.go b/controllers/cloudkittyapi_controller.go new file mode 100644 index 00000000..d4921e1a --- /dev/null +++ b/controllers/cloudkittyapi_controller.go @@ -0,0 +1,1216 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + telemetryv1 "github.com/openstack-k8s-operators/telemetry-operator/api/v1beta1" + "github.com/openstack-k8s-operators/telemetry-operator/pkg/cloudkitty" + "github.com/openstack-k8s-operators/telemetry-operator/pkg/cloudkittyapi" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/go-logr/logr" + networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + keystonev1 "github.com/openstack-k8s-operators/keystone-operator/api/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/common" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/endpoint" + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/labels" + nad "github.com/openstack-k8s-operators/lib-common/modules/common/networkattachment" + "github.com/openstack-k8s-operators/lib-common/modules/common/secret" + "github.com/openstack-k8s-operators/lib-common/modules/common/service" + "github.com/openstack-k8s-operators/lib-common/modules/common/statefulset" + "github.com/openstack-k8s-operators/lib-common/modules/common/tls" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" +) + +// GetClient - +func (r *CloudKittyAPIReconciler) GetClient() client.Client { + return r.Client +} + +// GetKClient - +func (r *CloudKittyAPIReconciler) GetKClient() kubernetes.Interface { + return r.Kclient +} + +// GetScheme - +func (r *CloudKittyAPIReconciler) GetScheme() *runtime.Scheme { + return r.Scheme +} + +// CloudKittyAPIReconciler reconciles a CloudKittyAPI object +type CloudKittyAPIReconciler struct { + client.Client + Kclient kubernetes.Interface + Scheme *runtime.Scheme +} + +// GetLogger returns a logger object with a logging prefix of "controller.name" and additional controller context fields +func (r *CloudKittyAPIReconciler) GetLogger(ctx context.Context) logr.Logger { + return log.FromContext(ctx).WithName("Controllers").WithName("CloudKittyAPI") +} + +var keystoneServices = []map[string]string{ + { + "type": cloudkitty.ServiceType, + "name": cloudkitty.ServiceName, + "desc": "CloudKitty V2 Service", + }, +} + +//+kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkittyapis,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkittyapis/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkittyapis/finalizers,verbs=update;patch +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;create;update;patch;delete;watch +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list; +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;create;update;patch;delete;watch +// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;create;update;patch;delete;watch +// +kubebuilder:rbac:groups=keystone.openstack.org,resources=keystoneservices,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=keystone.openstack.org,resources=keystoneendpoints,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch +// +kubebuilder:rbac:groups=topology.openstack.org,resources=topologies,verbs=get;list;watch;update + +// Reconcile - +func (r *CloudKittyAPIReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { + Log := r.GetLogger(ctx) + + // Fetch the CloudKittyAPI instance + instance := &telemetryv1.CloudKittyAPI{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8s_errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. + // For additional cleanup logic use finalizers. Return and don't requeue. + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, err + } + + helper, err := helper.NewHelper( + instance, + r.Client, + r.Kclient, + r.Scheme, + Log, + ) + if err != nil { + return ctrl.Result{}, err + } + + // + // initialize status + // + isNewInstance := instance.Status.Conditions == nil + if isNewInstance { + instance.Status.Conditions = condition.Conditions{} + } + + // Save a copy of the condtions so that we can restore the LastTransitionTime + // when a condition's state doesn't change. + savedConditions := instance.Status.Conditions.DeepCopy() + + // Always patch the instance status when exiting this function so we can persist any changes. + defer func() { + // Don't update the status, if reconciler Panics + if r := recover(); r != nil { + Log.Info(fmt.Sprintf("panic during reconcile %v\n", r)) + panic(r) + } + condition.RestoreLastTransitionTimes(&instance.Status.Conditions, savedConditions) + if instance.Status.Conditions.IsUnknown(condition.ReadyCondition) { + instance.Status.Conditions.Set( + instance.Status.Conditions.Mirror(condition.ReadyCondition)) + } + err := helper.PatchInstance(ctx, instance) + if err != nil { + _err = err + return + } + }() + + // Always initialize conditions used later as Status=Unknown + cl := condition.CreateList( + condition.UnknownCondition(condition.ReadyCondition, condition.InitReason, condition.ReadyInitMessage), + condition.UnknownCondition(condition.CreateServiceReadyCondition, condition.InitReason, condition.CreateServiceReadyInitMessage), + condition.UnknownCondition(condition.InputReadyCondition, condition.InitReason, condition.InputReadyInitMessage), + condition.UnknownCondition(condition.ServiceConfigReadyCondition, condition.InitReason, condition.ServiceConfigReadyInitMessage), + condition.UnknownCondition(condition.DeploymentReadyCondition, condition.InitReason, condition.DeploymentReadyInitMessage), + // right now we have no dedicated KeystoneServiceReadyInitMessage and KeystoneEndpointReadyInitMessage + condition.UnknownCondition(condition.KeystoneServiceReadyCondition, condition.InitReason, ""), + condition.UnknownCondition(condition.KeystoneEndpointReadyCondition, condition.InitReason, ""), + condition.UnknownCondition(condition.NetworkAttachmentsReadyCondition, condition.InitReason, condition.NetworkAttachmentsReadyInitMessage), + condition.UnknownCondition(condition.TLSInputReadyCondition, condition.InitReason, condition.InputReadyInitMessage), + ) + instance.Status.Conditions.Init(&cl) + // Always mark the Generation as observed early on + instance.Status.ObservedGeneration = instance.Generation + + // If we're not deleting this and the service object doesn't have our finalizer, add it. + if (instance.DeletionTimestamp.IsZero() && controllerutil.AddFinalizer(instance, helper.GetFinalizer())) || isNewInstance { + // Register overall status immediately to have an early feedback e.g. in the cli + return ctrl.Result{}, nil + } + + if instance.Status.Hash == nil { + instance.Status.Hash = map[string]string{} + } + if instance.Status.APIEndpoints == nil { + instance.Status.APIEndpoints = map[string]map[string]string{} + } + if instance.Status.ServiceIDs == nil { + instance.Status.ServiceIDs = map[string]string{} + } + if instance.Status.NetworkAttachments == nil { + instance.Status.NetworkAttachments = map[string][]string{} + } + + // Handle service delete + if !instance.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, instance, helper) + } + + // Init Topology condition if there's a reference + if instance.Spec.TopologyRef != nil { + c := condition.UnknownCondition(condition.TopologyReadyCondition, condition.InitReason, condition.TopologyReadyInitMessage) + cl.Set(c) + } + + // Handle non-deleted clusters + return r.reconcileNormal(ctx, instance, helper) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *CloudKittyAPIReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + Log := r.GetLogger(ctx) + + // Watch for changes to secrets we don't own. Global secrets + // (e.g. TransportURLSecret) are handled by the main cloudkitty controller. + secretFn := func(_ context.Context, o client.Object) []reconcile.Request { + var namespace = o.GetNamespace() + var secretName = o.GetName() + result := []reconcile.Request{} + + // get all API CRs + apis := &telemetryv1.CloudKittyAPIList{} + listOpts := []client.ListOption{ + client.InNamespace(namespace), + } + if err := r.List(context.Background(), apis, listOpts...); err != nil { + Log.Error(err, "Unable to retrieve API CRs %v") + return nil + } + + // Watch for changes to secrets where the owner label AND the + // CR.Spec.ManagingCrName label matches + label := o.GetLabels() + if l, ok := label[labels.GetOwnerNameLabelSelector(labels.GetGroupLabel(cloudkitty.ServiceName))]; ok { + for _, cr := range apis.Items { + // return reconcile event for the CR where the owner label AND the parentCloudKittyName matches + if l == cloudkitty.GetOwningCloudKittyName(&cr) { + // return namespace and Name of CR + name := client.ObjectKey{ + Namespace: o.GetNamespace(), + Name: cr.Name, + } + Log.Info(fmt.Sprintf("Secret %s and CR %s marked with label: %s", o.GetName(), cr.Name, l)) + + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + } + + // Watch for changes to any CustomServiceConfigSecrets + for _, cr := range apis.Items { + for _, v := range cr.Spec.CustomServiceConfigSecrets { + if v == secretName { + name := client.ObjectKey{ + Namespace: namespace, + Name: cr.Name, + } + Log.Info(fmt.Sprintf("Secret %s is used by CloudKitty CR %s", secretName, cr.Name)) + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + } + + // Watch for changes to the client cert secret + if secretName == cloudkitty.ClientCertSecretName { + for _, cr := range apis.Items { + name := client.ObjectKey{ + Namespace: namespace, + Name: cr.Name, + } + Log.Info(fmt.Sprintf("Secret %s is used by CloudKittyAPI CR %s", secretName, cr.Name)) + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + + // Watch for changes to the prometheus secret + if secretName == cloudkitty.PrometheusEndpointSecret { + for _, cr := range apis.Items { + // Get the parent CloudKitty name + parentCloudKittyName := cloudkitty.GetOwningCloudKittyName(&cr) + + // Fetch the parent CloudKitty instance + parentCloudKitty := &telemetryv1.CloudKitty{} + _ = r.Get(ctx, types.NamespacedName{ + Name: parentCloudKittyName, + Namespace: cr.Namespace, + }, parentCloudKitty) + + // Only return a reconcile event if we are using the prometheus secret + if parentCloudKitty.Spec.PrometheusHost == "" { + name := client.ObjectKey{ + Namespace: namespace, + Name: cr.Name, + } + Log.Info(fmt.Sprintf("Secret %s is used by CloudKittyAPI CR %s", secretName, cr.Name)) + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + } + + if len(result) > 0 { + return result + } + return nil + } + + // Watch for changes to configmaps we don't own. + configMapFn := func(_ context.Context, o client.Object) []reconcile.Request { + var namespace = o.GetNamespace() + var configMapName = o.GetName() + result := []reconcile.Request{} + + // get all API CRs + apis := &telemetryv1.CloudKittyAPIList{} + listOpts := []client.ListOption{ + client.InNamespace(namespace), + } + if err := r.List(context.Background(), apis, listOpts...); err != nil { + Log.Error(err, "Unable to retrieve API CRs %v") + return nil + } + + // Watch for changes to the ca cert config map + for _, cr := range apis.Items { + if configMapName == fmt.Sprintf("%s-lokistack-gateway-ca-bundle", cloudkitty.GetOwningCloudKittyName(&cr)) { + name := client.ObjectKey{ + Namespace: namespace, + Name: cr.Name, + } + Log.Info(fmt.Sprintf("ConfigMap %s is used by CloudKittyAPI CR %s", configMapName, cr.Name)) + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + if len(result) > 0 { + return result + } + return nil + } + + // index passwordSecretField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &telemetryv1.CloudKittyAPI{}, cloudKittyPasswordSecretField, func(rawObj client.Object) []string { + // Extract the secret name from the spec, if one is provided + cr := rawObj.(*telemetryv1.CloudKittyAPI) + if cr.Spec.Secret == "" { + return nil + } + return []string{cr.Spec.Secret} + }); err != nil { + return err + } + + // index caBundleSecretNameField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &telemetryv1.CloudKittyAPI{}, cloudKittyCaBundleSecretNameField, func(rawObj client.Object) []string { + // Extract the secret name from the spec, if one is provided + cr := rawObj.(*telemetryv1.CloudKittyAPI) + if cr.Spec.TLS.CaBundleSecretName == "" { + return nil + } + return []string{cr.Spec.TLS.CaBundleSecretName} + }); err != nil { + return err + } + + // index tlsAPIInternalField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &telemetryv1.CloudKittyAPI{}, cloudKittyTLSAPIInternalField, func(rawObj client.Object) []string { + // Extract the secret name from the spec, if one is provided + cr := rawObj.(*telemetryv1.CloudKittyAPI) + if cr.Spec.TLS.API.Internal.SecretName == nil { + return nil + } + return []string{*cr.Spec.TLS.API.Internal.SecretName} + }); err != nil { + return err + } + + // index tlsAPIPublicField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &telemetryv1.CloudKittyAPI{}, cloudKittyTLSAPIPublicField, func(rawObj client.Object) []string { + // Extract the secret name from the spec, if one is provided + cr := rawObj.(*telemetryv1.CloudKittyAPI) + if cr.Spec.TLS.API.Public.SecretName == nil { + return nil + } + return []string{*cr.Spec.TLS.API.Public.SecretName} + }); err != nil { + return err + } + + // index topologyField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &telemetryv1.CloudKittyAPI{}, cloudKittyTopologyField, func(rawObj client.Object) []string { + // Extract the topology name from the spec, if one is provided + cr := rawObj.(*telemetryv1.CloudKittyAPI) + if cr.Spec.TopologyRef == nil { + return nil + } + return []string{cr.Spec.TopologyRef.Name} + }); err != nil { + return err + } + + return ctrl.NewControllerManagedBy(mgr). + For(&telemetryv1.CloudKittyAPI{}). + Owns(&keystonev1.KeystoneService{}). + Owns(&keystonev1.KeystoneEndpoint{}). + Owns(&appsv1.StatefulSet{}). + Owns(&corev1.Service{}). + // watch the secrets we don't own + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(secretFn)). + Watches( + &corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(r.findObjectsForSrc), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Watches(&corev1.ConfigMap{}, + handler.EnqueueRequestsFromMapFunc(configMapFn)). + Watches(&topologyv1.Topology{}, + handler.EnqueueRequestsFromMapFunc(r.findObjectsForSrc), + builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Complete(r) +} + +func (r *CloudKittyAPIReconciler) findObjectsForSrc(ctx context.Context, src client.Object) []reconcile.Request { + requests := []reconcile.Request{} + + l := log.FromContext(ctx).WithName("Controllers").WithName("CloudKittyAPI") + + for _, field := range cloudKittyAPIWatchFields { + crList := &telemetryv1.CloudKittyAPIList{} + listOps := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector(field, src.GetName()), + Namespace: src.GetNamespace(), + } + err := r.List(ctx, crList, listOps) + if err != nil { + l.Error(err, fmt.Sprintf("listing %s for field: %s - %s", crList.GroupVersionKind().Kind, field, src.GetNamespace())) + return requests + } + + for _, item := range crList.Items { + l.Info(fmt.Sprintf("input source %s changed, reconcile: %s - %s", src.GetName(), item.GetName(), item.GetNamespace())) + + requests = append(requests, + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: item.GetName(), + Namespace: item.GetNamespace(), + }, + }, + ) + } + } + + return requests +} + +func (r *CloudKittyAPIReconciler) reconcileDelete(ctx context.Context, instance *telemetryv1.CloudKittyAPI, helper *helper.Helper) (ctrl.Result, error) { + Log := r.GetLogger(ctx) + + Log.Info(fmt.Sprintf("Reconciling Service '%s' delete", instance.Name)) + + // It's possible to get here before the endpoints have been set in the status, so check for this + if instance.Status.APIEndpoints != nil { + for _, ksSvc := range keystoneServices { + + // Remove the finalizer from our KeystoneEndpoint CR + keystoneEndpoint, err := keystonev1.GetKeystoneEndpointWithName(ctx, helper, ksSvc["name"], instance.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + return ctrl.Result{}, err + } + + if err == nil { + controllerutil.RemoveFinalizer(keystoneEndpoint, helper.GetFinalizer()) + if err = helper.GetClient().Update(ctx, keystoneEndpoint); err != nil && !k8s_errors.IsNotFound(err) { + return ctrl.Result{}, err + } + util.LogForObject(helper, "Removed finalizer from our KeystoneEndpoint", instance) + } + + // Remove the finalizer from our KeystoneService CR + keystoneService, err := keystonev1.GetKeystoneServiceWithName(ctx, helper, ksSvc["name"], instance.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + return ctrl.Result{}, err + } + + if err == nil { + controllerutil.RemoveFinalizer(keystoneService, helper.GetFinalizer()) + if err = helper.GetClient().Update(ctx, keystoneService); err != nil && !k8s_errors.IsNotFound(err) { + return ctrl.Result{}, err + } + util.LogForObject(helper, "Removed finalizer from our KeystoneService", instance) + } + } + } + + // Remove finalizer on the Topology CR + if ctrlResult, err := topologyv1.EnsureDeletedTopologyRef( + ctx, + helper, + instance.Status.LastAppliedTopology, + instance.Name, + ); err != nil { + return ctrlResult, err + } + + // Service is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(instance, helper.GetFinalizer()) + Log.Info(fmt.Sprintf("Reconciled Service '%s' delete successfully", instance.Name)) + + return ctrl.Result{}, nil +} + +func (r *CloudKittyAPIReconciler) reconcileInit( + ctx context.Context, + instance *telemetryv1.CloudKittyAPI, + helper *helper.Helper, + serviceLabels map[string]string, +) (ctrl.Result, error) { + Log := r.GetLogger(ctx) + + Log.Info(fmt.Sprintf("Reconciling Service '%s' init", instance.Name)) + + // + // expose the service (create service and return the created endpoint URLs) + // + + publicEndpointData := endpoint.Data{ + Port: cloudkitty.CloudKittyPublicPort, + Path: "", + } + internalEndpointData := endpoint.Data{ + Port: cloudkitty.CloudKittyInternalPort, + Path: "", + } + cloudkittyEndpoints := map[service.Endpoint]endpoint.Data{ + service.EndpointPublic: publicEndpointData, + service.EndpointInternal: internalEndpointData, + } + + apiEndpoints := make(map[string]string) + + for endpointType, data := range cloudkittyEndpoints { + endpointTypeStr := string(endpointType) + endpointName := cloudkitty.ServiceName + "-" + endpointTypeStr + svcOverride := instance.Spec.Override.Service[endpointType] + if svcOverride.EmbeddedLabelsAnnotations == nil { + svcOverride.EmbeddedLabelsAnnotations = &service.EmbeddedLabelsAnnotations{} + } + + exportLabels := util.MergeStringMaps( + serviceLabels, + map[string]string{ + service.AnnotationEndpointKey: endpointTypeStr, + }, + ) + + // Create the service + svc, err := service.NewService( + service.GenericService(&service.GenericServiceDetails{ + Name: endpointName, + Namespace: instance.Namespace, + Labels: exportLabels, + Selector: serviceLabels, + Port: service.GenericServicePort{ + Name: endpointName, + Port: data.Port, + Protocol: corev1.ProtocolTCP, + }, + }), + 5, + &svcOverride.OverrideSpec, + ) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.CreateServiceReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.CreateServiceReadyErrorMessage, + err.Error())) + + return ctrl.Result{}, err + } + + svc.AddAnnotation(map[string]string{ + service.AnnotationEndpointKey: endpointTypeStr, + }) + + // add Annotation to whether creating an ingress is required or not + if endpointType == service.EndpointPublic && svc.GetServiceType() == corev1.ServiceTypeClusterIP { + svc.AddAnnotation(map[string]string{ + service.AnnotationIngressCreateKey: "true", + }) + } else { + svc.AddAnnotation(map[string]string{ + service.AnnotationIngressCreateKey: "false", + }) + if svc.GetServiceType() == corev1.ServiceTypeLoadBalancer { + svc.AddAnnotation(map[string]string{ + service.AnnotationHostnameKey: svc.GetServiceHostname(), // add annotation to register service name in dnsmasq + }) + } + } + + ctrlResult, err := svc.CreateOrPatch(ctx, helper) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.CreateServiceReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.CreateServiceReadyErrorMessage, + err.Error())) + + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.CreateServiceReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.CreateServiceReadyRunningMessage)) + return ctrlResult, nil + } + // create service - end + + // if TLS is enabled + if instance.Spec.TLS.API.Enabled(endpointType) { + // set endpoint protocol to https + data.Protocol = ptr.To(service.ProtocolHTTPS) + } + + apiEndpoints[string(endpointType)], err = svc.GetAPIEndpoint( + svcOverride.EndpointURL, data.Protocol, data.Path) + if err != nil { + instance.Status.Conditions.MarkFalse( + condition.CreateServiceReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.CreateServiceReadyErrorMessage, + err.Error()) + return ctrl.Result{}, err + } + } + instance.Status.Conditions.MarkTrue(condition.CreateServiceReadyCondition, condition.CreateServiceReadyMessage) + + // + // Update instance status with service endpoint url from route host information + // + if instance.Status.APIEndpoints == nil { + instance.Status.APIEndpoints = map[string]map[string]string{} + } + instance.Status.APIEndpoints[cloudkitty.ServiceName] = apiEndpoints + // V2 - end + + // expose service - end + + // + // create service and user in keystone - - https://docs.openstack.org/CloudKitty/latest/install/install-rdo.html#configure-user-and-endpoints + // TODO: rework this + // + if instance.Status.ServiceIDs == nil { + instance.Status.ServiceIDs = map[string]string{} + } + + for _, ksSvc := range keystoneServices { + ksSvcSpec := keystonev1.KeystoneServiceSpec{ + ServiceType: ksSvc["type"], + ServiceName: ksSvc["name"], + ServiceDescription: ksSvc["desc"], + Enabled: true, + ServiceUser: instance.Spec.ServiceUser, + Secret: instance.Spec.Secret, + PasswordSelector: instance.Spec.PasswordSelectors.CloudKittyService, + } + + ksSvcObj := keystonev1.NewKeystoneService(ksSvcSpec, instance.Namespace, serviceLabels, cloudkitty.NormalDuration) + ctrlResult, err := ksSvcObj.CreateOrPatch(ctx, helper) + if err != nil { + instance.Status.Conditions.MarkFalse( + condition.KeystoneServiceReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + "Creating KeyStoneService CR %s", + err.Error()) + return ctrlResult, err + } + + // mirror the Status, Reason, Severity and Message of the latest keystoneservice condition + // into a local condition with the type condition.KeystoneServiceReadyCondition + c := ksSvcObj.GetConditions().Mirror(condition.KeystoneServiceReadyCondition) + if c != nil { + instance.Status.Conditions.Set(c) + } + + if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + instance.Status.ServiceIDs[ksSvc["name"]] = ksSvcObj.GetServiceID() + + ksEndptSpec := keystonev1.KeystoneEndpointSpec{ + ServiceName: ksSvc["name"], + Endpoints: instance.Status.APIEndpoints[ksSvc["name"]], + } + + ksEndptObj := keystonev1.NewKeystoneEndpoint( + ksSvc["name"], + instance.Namespace, + ksEndptSpec, + serviceLabels, + cloudkitty.NormalDuration) + ctrlResult, err = ksEndptObj.CreateOrPatch(ctx, helper) + if err != nil { + instance.Status.Conditions.MarkFalse( + condition.KeystoneEndpointReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + "Creating KeyStoneEndpoint CR %s", + err.Error()) + return ctrlResult, err + } + + // mirror the Status, Reason, Severity and Message of the latest keystoneendpoint condition + // into a local condition with the type condition.KeystoneEndpointReadyCondition + c = ksEndptObj.GetConditions().Mirror(condition.KeystoneEndpointReadyCondition) + if c != nil { + instance.Status.Conditions.Set(c) + } + + if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + } + + Log.Info(fmt.Sprintf("Reconciled Service '%s' init successfully", instance.Name)) + return ctrl.Result{}, nil +} + +func (r *CloudKittyAPIReconciler) reconcileNormal(ctx context.Context, instance *telemetryv1.CloudKittyAPI, helper *helper.Helper) (ctrl.Result, error) { + Log := r.GetLogger(ctx) + + Log.Info(fmt.Sprintf("Reconciling Service '%s'", instance.Name)) + + configVars := make(map[string]env.Setter) + + // + // check for required OpenStack secret holding passwords for service/admin user and add hash to the vars map + // + + ctrlResult, err := cloudkitty.VerifyServiceSecret( + ctx, + types.NamespacedName{Namespace: instance.Namespace, Name: instance.Spec.Secret}, + []string{ + instance.Spec.PasswordSelectors.CloudKittyService, + }, + helper.GetClient(), + &instance.Status.Conditions, + cloudkitty.NormalDuration, + &configVars, + ) + if err != nil { + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + // + // check for required Transport URL and config secrets + // + + parentCloudKittyName := cloudkitty.GetOwningCloudKittyName(instance) + secretNames := []string{ + instance.Spec.TransportURLSecret, // TransportURLSecret + fmt.Sprintf("%s-scripts", parentCloudKittyName), // ScriptsSecret + fmt.Sprintf("%s-config-data", parentCloudKittyName), // ConfigSecret + } + // Append CustomServiceConfigSecrets that should be checked + secretNames = append(secretNames, instance.Spec.CustomServiceConfigSecrets...) + + ctrlResult, err = cloudkitty.VerifyConfigSecrets( + ctx, + helper, + &instance.Status.Conditions, + secretNames, + instance.Namespace, + &configVars, + ) + if err != nil { + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + instance.Status.Conditions.MarkTrue(condition.InputReadyCondition, condition.InputReadyMessage) + + // + // TLS input validation + // + // Validate the CA cert secret if provided + if instance.Spec.TLS.CaBundleSecretName != "" { + hash, err := tls.ValidateCACertSecret( + ctx, + helper.GetClient(), + types.NamespacedName{ + Name: instance.Spec.TLS.CaBundleSecretName, + Namespace: instance.Namespace, + }, + ) + if err != nil { + if k8s_errors.IsNotFound(err) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TLSInputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.TLSInputReadyWaitingMessage, + instance.Spec.TLS.CaBundleSecretName)) + return ctrl.Result{}, nil + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TLSInputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.TLSInputErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + if hash != "" { + configVars[tls.CABundleKey] = env.SetValue(hash) + } + } + + // Validate API service certs secrets + certsHash, err := instance.Spec.TLS.API.ValidateCertSecrets(ctx, helper, instance.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TLSInputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.TLSInputReadyWaitingMessage, + err.Error())) + return ctrl.Result{}, nil + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TLSInputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.TLSInputErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + configVars[tls.TLSHashName] = env.SetValue(certsHash) + + // all cert input checks out so report InputReady + instance.Status.Conditions.MarkTrue(condition.TLSInputReadyCondition, condition.InputReadyMessage) + + // + // Create secrets required as input for the Service and calculate an overall hash of hashes + // + serviceLabels := map[string]string{ + common.AppSelector: cloudkitty.ServiceName, + common.ComponentSelector: cloudkittyapi.ComponentName, + } + + // + // create custom config for this cloudkitty service + // + err = r.generateServiceConfigs(ctx, helper, instance, &configVars, serviceLabels) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + instance.Status.Conditions.MarkTrue(condition.ServiceConfigReadyCondition, condition.ServiceConfigReadyMessage) + + // + // TODO check when/if Init, Update, or Upgrade should/could be skipped + // + + // networks to attach to + nadList := []networkv1.NetworkAttachmentDefinition{} + for _, netAtt := range instance.Spec.NetworkAttachments { + nad, err := nad.GetNADWithName(ctx, helper, netAtt, instance.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + Log.Info(fmt.Sprintf("network-attachment-definition %s not found", netAtt)) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.NetworkAttachmentsReadyWaitingMessage, + netAtt)) + //nolint:err113 // Dynamic error message with network attachment name + return cloudkitty.ResultRequeue, fmt.Errorf("network-attachment-definition %s not found", netAtt) + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + if nad != nil { + nadList = append(nadList, *nad) + } + } + + serviceAnnotations, err := nad.EnsureNetworksAnnotation(nadList) + if err != nil { + err = fmt.Errorf("failed create network annotation from %s: %w", instance.Spec.NetworkAttachments, err) + instance.Status.Conditions.MarkFalse( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error()) + return ctrl.Result{}, err + } + + // Handle service init + ctrlResult, err = r.reconcileInit(ctx, instance, helper, serviceLabels) + if err != nil { + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + // + // Handle Topology + // + topology, err := ensureTopology( + ctx, + helper, + instance, // topologyHandler + instance.Name, // finalizer + &instance.Status.Conditions, + labels.GetLabelSelector(serviceLabels), + ) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TopologyReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.TopologyReadyErrorMessage, + err.Error())) + return ctrl.Result{}, fmt.Errorf("waiting for Topology requirements: %w", err) + } + + // + // normal reconcile tasks + // + + // Add client cert secret hash to all the other config data, so that + // restart is triggered on certificate changes (e.g. when they + // rotate) + _, clientCertHash, err := secret.GetSecret( + ctx, + helper, + cloudkitty.ClientCertSecretName, + instance.Namespace, + ) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + configVars["client-cert"] = env.SetValue(clientCertHash) + + // + // create hash over all the different input resources to identify if any those changed + // and a restart/recreate is required. + // + inputHash, hashChanged, err := r.createHashOfInputHashes(ctx, instance, configVars) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } else if hashChanged { + Log.Info(fmt.Sprintf("%s... requeueing", condition.ServiceConfigReadyInitMessage)) + instance.Status.Conditions.MarkFalse( + condition.ServiceConfigReadyCondition, + condition.InitReason, + condition.SeverityInfo, + condition.ServiceConfigReadyInitMessage) + // Hash changed and instance status should be updated (which will be done by main defer func), + // so we need to return and reconcile again + return ctrl.Result{}, nil + } + + // Deploy a statefulset + ssDef, err := cloudkittyapi.StatefulSet(instance, inputHash, serviceLabels, serviceAnnotations, topology) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DeploymentReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + ss := statefulset.NewStatefulSet(ssDef, cloudkitty.ShortDuration) + + var ssData appsv1.StatefulSet + ctrlResult, err = ss.CreateOrPatch(ctx, helper) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DeploymentReadyErrorMessage, + err.Error())) + return ctrlResult, err + + } else if (ctrlResult == ctrl.Result{}) { + // Wait until the data in the StatefulSet is for the current generation + ssData = ss.GetStatefulSet() + if ssData.Generation != ssData.Status.ObservedGeneration { + ctrlResult = cloudkitty.ResultRequeue + //nolint:err113 // Dynamic error message with statefulset name + err = fmt.Errorf("waiting for Statefulset %s to start reconciling", ssData.Name) + } + } + + if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DeploymentReadyRunningMessage)) + // If the deployment is not ready, then neither are the NADs + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.NetworkAttachmentsReadyInitMessage)) + return ctrlResult, err + } + + instance.Status.ReadyCount = ssData.Status.ReadyReplicas + + // verify if network attachment matches expectations + networkReady := false + networkAttachmentStatus := map[string][]string{} + if *instance.Spec.Replicas > 0 { + networkReady, networkAttachmentStatus, err = nad.VerifyNetworkStatusFromAnnotation( + ctx, + helper, + instance.Spec.NetworkAttachments, + serviceLabels, + instance.Status.ReadyCount, + ) + if err != nil { + err = fmt.Errorf("verifying API NetworkAttachments (%s) %w", instance.Spec.NetworkAttachments, err) + instance.Status.Conditions.MarkFalse( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error()) + return ctrl.Result{}, err + } + } else { + networkReady = true + } + + instance.Status.NetworkAttachments = networkAttachmentStatus + if networkReady { + instance.Status.Conditions.MarkTrue(condition.NetworkAttachmentsReadyCondition, condition.NetworkAttachmentsReadyMessage) + } else { + //nolint:err113 // Dynamic error message with network attachments + err := fmt.Errorf("not all pods have interfaces with ips as configured in NetworkAttachments: %s", instance.Spec.NetworkAttachments) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error())) + + return ctrl.Result{}, err + } + + if instance.Status.ReadyCount > 0 { + instance.Status.Conditions.MarkTrue(condition.DeploymentReadyCondition, condition.DeploymentReadyMessage) + + } else if *instance.Spec.Replicas > 0 { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DeploymentReadyRunningMessage)) + + } else { + instance.Status.Conditions.MarkFalse( + condition.DeploymentReadyCondition, + condition.NotRequestedReason, + condition.SeverityInfo, + condition.DeploymentReadyInitMessage) + } + // create StatefulSet - end + + Log.Info(fmt.Sprintf("Reconciled Service '%s' successfully", instance.Name)) + // update the overall status condition if service is ready + if instance.IsReady() { + instance.Status.Conditions.MarkTrue(condition.ReadyCondition, condition.ReadyMessage) + } + // For non ready we'll let the main defer func handle the status update using the Mirror function + return ctrl.Result{}, nil +} + +// generateServiceConfigs - create Secret which holds the service configuration +func (r *CloudKittyAPIReconciler) generateServiceConfigs( + ctx context.Context, + h *helper.Helper, + instance *telemetryv1.CloudKittyAPI, + envVars *map[string]env.Setter, + serviceLabels map[string]string, +) error { + // + // create custom Secret for cloudkitty service-specific config input + // - %-config-data holds custom config for the service + // + + labels := labels.GetLabels(instance, labels.GetGroupLabel(cloudkitty.ServiceName), serviceLabels) + + // customData hold any customization for the service. + customData := map[string]string{cloudkitty.CustomServiceConfigFileName: instance.Spec.CustomServiceConfig} + + // Fetch the two service config snippets (DefaultsConfigFileName and + // CustomConfigFileName) from the Secret generated by the top level + // cloudkitty controller, and add them to this service specific Secret. + cloudkittySecretName := cloudkitty.GetOwningCloudKittyName(instance) + "-config-data" + cloudkittySecret, _, err := secret.GetSecret(ctx, h, cloudkittySecretName, instance.Namespace) + if err != nil { + return err + } + customData[cloudkitty.DefaultsConfigFileName] = string(cloudkittySecret.Data[cloudkitty.DefaultsConfigFileName]) + customData[cloudkitty.CustomConfigFileName] = string(cloudkittySecret.Data[cloudkitty.CustomConfigFileName]) + + customSecrets := "" + for _, secretName := range instance.Spec.CustomServiceConfigSecrets { + secret, _, err := secret.GetSecret(ctx, h, secretName, instance.Namespace) + if err != nil { + return err + } + for _, data := range secret.Data { + customSecrets += string(data) + "\n" + } + } + customData[cloudkitty.CustomServiceConfigSecretsFileName] = customSecrets + + templateParameters := map[string]interface{}{ + "LogFile": cloudkittyapi.LogFile, + } + + configTemplates := []util.Template{ + { + Name: fmt.Sprintf("%s-config-data", instance.Name), + Namespace: instance.Namespace, + Type: util.TemplateTypeConfig, + InstanceType: instance.Kind, + CustomData: customData, + ConfigOptions: templateParameters, + Labels: labels, + }, + } + + return secret.EnsureSecrets(ctx, h, instance, configTemplates, envVars) +} + +// createHashOfInputHashes - creates a hash of hashes which gets added to the resources which requires a restart +// if any of the input resources change, like configs, passwords, ... +// +// returns the hash, whether the hash changed (as a bool) and any error +func (r *CloudKittyAPIReconciler) createHashOfInputHashes( + ctx context.Context, + instance *telemetryv1.CloudKittyAPI, + envVars map[string]env.Setter, +) (string, bool, error) { + Log := r.GetLogger(ctx) + + var hashMap map[string]string + changed := false + mergedMapVars := env.MergeEnvs([]corev1.EnvVar{}, envVars) + hash, err := util.ObjectHash(mergedMapVars) + if err != nil { + return hash, changed, err + } + if hashMap, changed = util.SetHash(instance.Status.Hash, common.InputHashName, hash); changed { + instance.Status.Hash = hashMap + Log.Info(fmt.Sprintf("Input maps hash %s - %s", common.InputHashName, hash)) + } + return hash, changed, nil +} diff --git a/controllers/cloudkittyproc_controller.go b/controllers/cloudkittyproc_controller.go new file mode 100644 index 00000000..7eb5df36 --- /dev/null +++ b/controllers/cloudkittyproc_controller.go @@ -0,0 +1,860 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + + telemetryv1 "github.com/openstack-k8s-operators/telemetry-operator/api/v1beta1" + "github.com/openstack-k8s-operators/telemetry-operator/pkg/cloudkitty" + "github.com/openstack-k8s-operators/telemetry-operator/pkg/cloudkittyproc" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/go-logr/logr" + networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/common" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + "github.com/openstack-k8s-operators/lib-common/modules/common/labels" + nad "github.com/openstack-k8s-operators/lib-common/modules/common/networkattachment" + "github.com/openstack-k8s-operators/lib-common/modules/common/secret" + "github.com/openstack-k8s-operators/lib-common/modules/common/statefulset" + "github.com/openstack-k8s-operators/lib-common/modules/common/tls" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" +) + +// GetClient - +func (r *CloudKittyProcReconciler) GetClient() client.Client { + return r.Client +} + +// GetKClient - +func (r *CloudKittyProcReconciler) GetKClient() kubernetes.Interface { + return r.Kclient +} + +// GetScheme - +func (r *CloudKittyProcReconciler) GetScheme() *runtime.Scheme { + return r.Scheme +} + +// CloudKittyProcReconciler reconciles a CloudKittyProc object +type CloudKittyProcReconciler struct { + client.Client + Kclient kubernetes.Interface + Scheme *runtime.Scheme +} + +// GetLogger returns a logger object with a logging prefix of "controller.name" and additional controller context fields +func (r *CloudKittyProcReconciler) GetLogger(ctx context.Context) logr.Logger { + return log.FromContext(ctx).WithName("Controllers").WithName("CloudKittyProc") +} + +//+kubebuilder:rbac:groups=cloudkitty.openstack.org,resources=cloudkittyprocs,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=cloudkitty.openstack.org,resources=cloudkittyprocs/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=cloudkitty.openstack.org,resources=cloudkittyprocs/finalizers,verbs=update;patch +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list; +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;create;update;patch;delete;watch +// +kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch +// +kubebuilder:rbac:groups=topology.openstack.org,resources=topologies,verbs=get;list;watch;update + +// Reconcile - +func (r *CloudKittyProcReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { + Log := r.GetLogger(ctx) + + // Fetch the CloudKittyProc instance + instance := &telemetryv1.CloudKittyProc{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8s_errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. + // For additional cleanup logic use finalizers. Return and don't requeue. + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, err + } + + helper, err := helper.NewHelper( + instance, + r.Client, + r.Kclient, + r.Scheme, + Log, + ) + if err != nil { + return ctrl.Result{}, err + } + + // + // initialize status + // + isNewInstance := instance.Status.Conditions == nil + if isNewInstance { + instance.Status.Conditions = condition.Conditions{} + } + + // Save a copy of the condtions so that we can restore the LastTransitionTime + // when a condition's state doesn't change. + savedConditions := instance.Status.Conditions.DeepCopy() + + // Always patch the instance status when exiting this function so we can persist any changes. + defer func() { + // Don't update the status, if reconciler Panics + if r := recover(); r != nil { + Log.Info(fmt.Sprintf("panic during reconcile %v\n", r)) + panic(r) + } + condition.RestoreLastTransitionTimes(&instance.Status.Conditions, savedConditions) + if instance.Status.Conditions.IsUnknown(condition.ReadyCondition) { + instance.Status.Conditions.Set( + instance.Status.Conditions.Mirror(condition.ReadyCondition)) + } + err := helper.PatchInstance(ctx, instance) + if err != nil { + _err = err + return + } + }() + + // Always initialize conditions used later as Status=Unknown + cl := condition.CreateList( + condition.UnknownCondition(condition.ReadyCondition, condition.InitReason, condition.ReadyInitMessage), + condition.UnknownCondition(condition.InputReadyCondition, condition.InitReason, condition.InputReadyInitMessage), + condition.UnknownCondition(condition.ServiceConfigReadyCondition, condition.InitReason, condition.ServiceConfigReadyInitMessage), + condition.UnknownCondition(condition.DeploymentReadyCondition, condition.InitReason, condition.DeploymentReadyInitMessage), + condition.UnknownCondition(condition.NetworkAttachmentsReadyCondition, condition.InitReason, condition.NetworkAttachmentsReadyInitMessage), + condition.UnknownCondition(condition.TLSInputReadyCondition, condition.InitReason, condition.InputReadyInitMessage), + ) + instance.Status.Conditions.Init(&cl) + // Always mark the Generation as observed early on + instance.Status.ObservedGeneration = instance.Generation + + // If we're not deleting this and the service object doesn't have our finalizer, add it. + if (instance.DeletionTimestamp.IsZero() && controllerutil.AddFinalizer(instance, helper.GetFinalizer())) || isNewInstance { + // Register overall status immediately to have an early feedback e.g. in the cli + return ctrl.Result{}, nil + } + + if instance.Status.Hash == nil { + instance.Status.Hash = map[string]string{} + } + if instance.Status.NetworkAttachments == nil { + instance.Status.NetworkAttachments = map[string][]string{} + } + + // Handle service delete + if !instance.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, instance, helper) + } + + // Init Topology condition if there's a reference + if instance.Spec.TopologyRef != nil { + c := condition.UnknownCondition(condition.TopologyReadyCondition, condition.InitReason, condition.TopologyReadyInitMessage) + cl.Set(c) + } + + // Handle non-deleted clusters + return r.reconcileNormal(ctx, instance, helper) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *CloudKittyProcReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + Log := r.GetLogger(ctx) + + // Watch for changes to secrets we don't own. Global secrets + // (e.g. TransportURLSecret) are handled by the main cloudkitty controller. + secretFn := func(_ context.Context, o client.Object) []reconcile.Request { + var namespace = o.GetNamespace() + var secretName = o.GetName() + result := []reconcile.Request{} + + // get all CloudKittyProc CRs + cloudKittyProcs := &telemetryv1.CloudKittyProcList{} + listOpts := []client.ListOption{ + client.InNamespace(namespace), + } + if err := r.List(context.Background(), cloudKittyProcs, listOpts...); err != nil { + Log.Error(err, "Unable to retrieve scheduler CRs %v") + return nil + } + + // Watch for changes to secrets where the owner label AND the + // CR.Spec.ManagingCrName label matches + label := o.GetLabels() + if l, ok := label[labels.GetOwnerNameLabelSelector(labels.GetGroupLabel(cloudkitty.ServiceName))]; ok { + for _, cr := range cloudKittyProcs.Items { + // return reconcile event for the CR where the owner label AND the parentCloudKittyName matches + if l == cloudkitty.GetOwningCloudKittyName(&cr) { + // return namespace and Name of CR + name := client.ObjectKey{ + Namespace: o.GetNamespace(), + Name: cr.Name, + } + Log.Info(fmt.Sprintf("Secret %s and CR %s marked with label: %s", o.GetName(), cr.Name, l)) + + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + } + + // Watch for changes to any CustomServiceConfigSecrets + for _, cr := range cloudKittyProcs.Items { + for _, v := range cr.Spec.CustomServiceConfigSecrets { + if v == secretName { + name := client.ObjectKey{ + Namespace: namespace, + Name: cr.Name, + } + Log.Info(fmt.Sprintf("Secret %s is used by CloudKitty CR %s", secretName, cr.Name)) + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + } + + // Watch for changes to the client cert secret + if secretName == cloudkitty.ClientCertSecretName { + for _, cr := range cloudKittyProcs.Items { + name := client.ObjectKey{ + Namespace: namespace, + Name: cr.Name, + } + Log.Info(fmt.Sprintf("Secret %s is used by CloudKittyProc CR %s", secretName, cr.Name)) + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + + // Watch for changes to the prometheus secret + if secretName == cloudkitty.PrometheusEndpointSecret { + for _, cr := range cloudKittyProcs.Items { + // Get the parent CloudKitty name + parentCloudKittyName := cloudkitty.GetOwningCloudKittyName(&cr) + + // Fetch the parent CloudKitty instance + parentCloudKitty := &telemetryv1.CloudKitty{} + _ = r.Get(ctx, types.NamespacedName{ + Name: parentCloudKittyName, + Namespace: cr.Namespace, + }, parentCloudKitty) + + // Only return a reconcile event if we are using the prometheus secret + if parentCloudKitty.Spec.PrometheusHost == "" { + name := client.ObjectKey{ + Namespace: namespace, + Name: cr.Name, + } + Log.Info(fmt.Sprintf("Secret %s is used by CloudKittyProc CR %s", secretName, cr.Name)) + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + } + + if len(result) > 0 { + return result + } + return nil + } + + // Watch for changes to configmaps we don't own. + configMapFn := func(_ context.Context, o client.Object) []reconcile.Request { + var namespace = o.GetNamespace() + var configMapName = o.GetName() + result := []reconcile.Request{} + + // get all Proc CRs + procs := &telemetryv1.CloudKittyProcList{} + listOpts := []client.ListOption{ + client.InNamespace(namespace), + } + if err := r.List(context.Background(), procs, listOpts...); err != nil { + Log.Error(err, "Unable to retrieve Proc CRs %v") + return nil + } + + // Watch for changes to the ca cert config map + for _, cr := range procs.Items { + if configMapName == fmt.Sprintf("%s-lokistack-gateway-ca-bundle", cloudkitty.GetOwningCloudKittyName(&cr)) { + name := client.ObjectKey{ + Namespace: namespace, + Name: cr.Name, + } + Log.Info(fmt.Sprintf("ConfigMap %s is used by CloudKittyProc CR %s", configMapName, cr.Name)) + result = append(result, reconcile.Request{NamespacedName: name}) + } + } + if len(result) > 0 { + return result + } + return nil + } + + // index passwordSecretField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &telemetryv1.CloudKittyProc{}, cloudKittyPasswordSecretField, func(rawObj client.Object) []string { + // Extract the secret name from the spec, if one is provided + cr := rawObj.(*telemetryv1.CloudKittyProc) + if cr.Spec.Secret == "" { + return nil + } + return []string{cr.Spec.Secret} + }); err != nil { + return err + } + + // index caBundleSecretNameField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &telemetryv1.CloudKittyProc{}, cloudKittyCaBundleSecretNameField, func(rawObj client.Object) []string { + // Extract the secret name from the spec, if one is provided + cr := rawObj.(*telemetryv1.CloudKittyProc) + if cr.Spec.TLS.CaBundleSecretName == "" { + return nil + } + return []string{cr.Spec.TLS.CaBundleSecretName} + }); err != nil { + return err + } + + // index topologyField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &telemetryv1.CloudKittyProc{}, cloudKittyTopologyField, func(rawObj client.Object) []string { + // Extract the topology name from the spec, if one is provided + cr := rawObj.(*telemetryv1.CloudKittyProc) + if cr.Spec.TopologyRef == nil { + return nil + } + return []string{cr.Spec.TopologyRef.Name} + }); err != nil { + return err + } + + return ctrl.NewControllerManagedBy(mgr). + For(&telemetryv1.CloudKittyProc{}). + Owns(&appsv1.StatefulSet{}). + // watch the secrets we don't own + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(secretFn)). + Watches( + &corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(r.findObjectsForSrc), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Watches(&corev1.ConfigMap{}, + handler.EnqueueRequestsFromMapFunc(configMapFn)). + Watches(&topologyv1.Topology{}, + handler.EnqueueRequestsFromMapFunc(r.findObjectsForSrc), + builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Complete(r) +} + +func (r *CloudKittyProcReconciler) findObjectsForSrc(ctx context.Context, src client.Object) []reconcile.Request { + requests := []reconcile.Request{} + + l := log.FromContext(ctx).WithName("Controllers").WithName("CloudKittyProc") + + for _, field := range cloudKittyProcWatchFields { + crList := &telemetryv1.CloudKittyProcList{} + listOps := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector(field, src.GetName()), + Namespace: src.GetNamespace(), + } + err := r.List(ctx, crList, listOps) + if err != nil { + l.Error(err, fmt.Sprintf("listing %s for field: %s - %s", crList.GroupVersionKind().Kind, field, src.GetNamespace())) + return requests + } + + for _, item := range crList.Items { + l.Info(fmt.Sprintf("input source %s changed, reconcile: %s - %s", src.GetName(), item.GetName(), item.GetNamespace())) + + requests = append(requests, + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: item.GetName(), + Namespace: item.GetNamespace(), + }, + }, + ) + } + } + + return requests +} + +func (r *CloudKittyProcReconciler) reconcileDelete(ctx context.Context, instance *telemetryv1.CloudKittyProc, helper *helper.Helper) (ctrl.Result, error) { + Log := r.GetLogger(ctx) + + Log.Info(fmt.Sprintf("Reconciling Service '%s' delete", instance.Name)) + + // Service is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(instance, helper.GetFinalizer()) + Log.Info(fmt.Sprintf("Reconciled Service '%s' delete successfully", instance.Name)) + + // Remove finalizer on the Topology CR + if ctrlResult, err := topologyv1.EnsureDeletedTopologyRef( + ctx, + helper, + instance.Status.LastAppliedTopology, + instance.Name, + ); err != nil { + return ctrlResult, err + } + return ctrl.Result{}, nil +} + +func (r *CloudKittyProcReconciler) reconcileNormal(ctx context.Context, instance *telemetryv1.CloudKittyProc, helper *helper.Helper) (ctrl.Result, error) { + Log := r.GetLogger(ctx) + + Log.Info(fmt.Sprintf("Reconciling Service '%s'", instance.Name)) + + configVars := make(map[string]env.Setter) + + // + // check for required OpenStack secret holding passwords for service/admin user and add hash to the vars map + // + + ctrlResult, err := cloudkitty.VerifyServiceSecret( + ctx, + types.NamespacedName{Namespace: instance.Namespace, Name: instance.Spec.Secret}, + []string{ + instance.Spec.PasswordSelectors.CloudKittyService, + }, + helper.GetClient(), + &instance.Status.Conditions, + cloudkitty.NormalDuration, + &configVars, + ) + if err != nil { + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + // + // check for required Transport URL and config secrets + // + + parentCloudKittyName := cloudkitty.GetOwningCloudKittyName(instance) + secretNames := []string{ + instance.Spec.TransportURLSecret, // TransportURLSecret + fmt.Sprintf("%s-scripts", parentCloudKittyName), // ScriptsSecret + fmt.Sprintf("%s-config-data", parentCloudKittyName), // ConfigSecret + } + // Append CustomServiceConfigSecrets that should be checked + secretNames = append(secretNames, instance.Spec.CustomServiceConfigSecrets...) + + ctrlResult, err = cloudkitty.VerifyConfigSecrets( + ctx, + helper, + &instance.Status.Conditions, + secretNames, + instance.Namespace, + &configVars, + ) + if err != nil { + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + instance.Status.Conditions.MarkTrue(condition.InputReadyCondition, condition.InputReadyMessage) + + // + // TLS input validation + // + // Validate the CA cert secret if provided + if instance.Spec.TLS.CaBundleSecretName != "" { + hash, err := tls.ValidateCACertSecret( + ctx, + helper.GetClient(), + types.NamespacedName{ + Name: instance.Spec.TLS.CaBundleSecretName, + Namespace: instance.Namespace, + }, + ) + if err != nil { + if k8s_errors.IsNotFound(err) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TLSInputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.TLSInputReadyWaitingMessage, + instance.Spec.TLS.CaBundleSecretName)) + return ctrl.Result{}, nil + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TLSInputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.TLSInputErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + if hash != "" { + configVars[tls.CABundleKey] = env.SetValue(hash) + } + } + // all cert input checks out so report InputReady + instance.Status.Conditions.MarkTrue(condition.TLSInputReadyCondition, condition.InputReadyMessage) + + // + // Create ConfigMaps required as input for the Service and calculate an overall hash of hashes + // + serviceLabels := map[string]string{ + common.AppSelector: cloudkitty.ServiceName, + common.ComponentSelector: cloudkittyproc.ComponentName, + } + + // + // create custom config for this cloudkitty service + // + err = r.generateServiceConfigs(ctx, helper, instance, &configVars, serviceLabels) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + // Add client cert secret hash to all the other config data, so that + // restart is triggered on certificate changes (e.g. when they + // rotate) + _, clientCertHash, err := secret.GetSecret( + ctx, + helper, + cloudkitty.ClientCertSecretName, + instance.Namespace, + ) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + configVars["client-cert"] = env.SetValue(clientCertHash) + + // + // create hash over all the different input resources to identify if any those changed + // and a restart/recreate is required. + // + inputHash, hashChanged, err := r.createHashOfInputHashes(ctx, instance, configVars) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } else if hashChanged { + Log.Info(fmt.Sprintf("%s... requeueing", condition.ServiceConfigReadyInitMessage)) + instance.Status.Conditions.MarkFalse( + condition.ServiceConfigReadyCondition, + condition.InitReason, + condition.SeverityInfo, + condition.ServiceConfigReadyInitMessage) + // Hash changed and instance status should be updated (which will be done by main defer func), + // so we need to return and reconcile again + return ctrl.Result{}, nil + } + instance.Status.Conditions.MarkTrue(condition.ServiceConfigReadyCondition, condition.ServiceConfigReadyMessage) + + // + // TODO check when/if Init, Update, or Upgrade should/could be skipped + // + + // networks to attach to + nadList := []networkv1.NetworkAttachmentDefinition{} + for _, netAtt := range instance.Spec.NetworkAttachments { + nad, err := nad.GetNADWithName(ctx, helper, netAtt, instance.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + Log.Info(fmt.Sprintf("network-attachment-definition %s not found", netAtt)) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.NetworkAttachmentsReadyWaitingMessage, + netAtt)) + //nolint:err113 // Dynamic error message with network attachment name + return cloudkitty.ResultRequeue, fmt.Errorf("network-attachment-definition %s not found", netAtt) + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + if nad != nil { + nadList = append(nadList, *nad) + } + } + + serviceAnnotations, err := nad.EnsureNetworksAnnotation(nadList) + if err != nil { + err = fmt.Errorf("failed create network annotation from %s: %w", instance.Spec.NetworkAttachments, err) + instance.Status.Conditions.MarkFalse( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error()) + return ctrl.Result{}, err + } + + // + // normal reconcile tasks + // + + // + // Handle Topology + // + topology, err := ensureTopology( + ctx, + helper, + instance, // topologyHandler + instance.Name, // finalizer + &instance.Status.Conditions, + labels.GetLabelSelector(serviceLabels), + ) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TopologyReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.TopologyReadyErrorMessage, + err.Error())) + return ctrl.Result{}, fmt.Errorf("waiting for Topology requirements: %w", err) + } + + // Deploy a statefulset + ssDef := cloudkittyproc.StatefulSet(instance, inputHash, serviceLabels, serviceAnnotations, topology) + ss := statefulset.NewStatefulSet(ssDef, cloudkitty.ShortDuration) + + var ssData appsv1.StatefulSet + ctrlResult, err = ss.CreateOrPatch(ctx, helper) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DeploymentReadyErrorMessage, + err.Error())) + return ctrlResult, err + + } else if (ctrlResult == ctrl.Result{}) { + // Wait until the data in the StatefulSet is for the current generation + ssData = ss.GetStatefulSet() + if ssData.Generation != ssData.Status.ObservedGeneration { + ctrlResult = cloudkitty.ResultRequeue + //nolint:err113 // Dynamic error message with statefulset name + err = fmt.Errorf("waiting for Statefulset %s to start reconciling", ssData.Name) + } + } + + if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DeploymentReadyRunningMessage)) + // If the deployment is not ready, then neither are the NADs + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.NetworkAttachmentsReadyInitMessage)) + return ctrlResult, err + } + + instance.Status.ReadyCount = ssData.Status.ReadyReplicas + + // verify if network attachment matches expectations + networkReady := false + networkAttachmentStatus := map[string][]string{} + if *instance.Spec.Replicas > 0 { + networkReady, networkAttachmentStatus, err = nad.VerifyNetworkStatusFromAnnotation( + ctx, + helper, + instance.Spec.NetworkAttachments, + serviceLabels, + instance.Status.ReadyCount, + ) + if err != nil { + err = fmt.Errorf("verifying API NetworkAttachments (%s) %w", instance.Spec.NetworkAttachments, err) + instance.Status.Conditions.MarkFalse( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error()) + return ctrl.Result{}, err + } + } else { + networkReady = true + } + + instance.Status.NetworkAttachments = networkAttachmentStatus + if networkReady { + instance.Status.Conditions.MarkTrue(condition.NetworkAttachmentsReadyCondition, condition.NetworkAttachmentsReadyMessage) + } else { + //nolint:err113 // Dynamic error message with network attachments + err := fmt.Errorf("not all pods have interfaces with ips as configured in NetworkAttachments: %s", instance.Spec.NetworkAttachments) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error())) + + return ctrl.Result{}, err + } + + if instance.Status.ReadyCount > 0 { + instance.Status.Conditions.MarkTrue(condition.DeploymentReadyCondition, condition.DeploymentReadyMessage) + } else if *instance.Spec.Replicas > 0 { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DeploymentReadyRunningMessage)) + + } else { + instance.Status.Conditions.MarkFalse( + condition.DeploymentReadyCondition, + condition.NotRequestedReason, + condition.SeverityInfo, + condition.DeploymentReadyInitMessage) + } + // create StatefulSet - end + + Log.Info(fmt.Sprintf("Reconciled Service '%s' successfully", instance.Name)) + // update the overall status condition if service is ready + if instance.IsReady() { + instance.Status.Conditions.MarkTrue(condition.ReadyCondition, condition.ReadyMessage) + } + // For non ready we'll let the main defer func handle the status update using the Mirror function + return ctrl.Result{}, nil +} + +// generateServiceConfigs - create Secret which holds the service configuration +func (r *CloudKittyProcReconciler) generateServiceConfigs( + ctx context.Context, + h *helper.Helper, + instance *telemetryv1.CloudKittyProc, + envVars *map[string]env.Setter, + serviceLabels map[string]string, +) error { + // + // create custom Secret for cloudkitty service-specific config input + // - %-config-data holds custom config for the service + // + + labels := labels.GetLabels(instance, labels.GetGroupLabel(cloudkitty.ServiceName), serviceLabels) + + // customData hold any customization for the service. + customData := map[string]string{cloudkitty.CustomServiceConfigFileName: instance.Spec.CustomServiceConfig} + + // Fetch the two service config snippets (DefaultsConfigFileName and + // CustomConfigFileName) from the Secret generated by the top level + // cloudkitty controller, and add them to this service specific Secret. + cloudkittySecretName := cloudkitty.GetOwningCloudKittyName(instance) + "-config-data" + cloudkittySecret, _, err := secret.GetSecret(ctx, h, cloudkittySecretName, instance.Namespace) + if err != nil { + return err + } + customData[cloudkitty.DefaultsConfigFileName] = string(cloudkittySecret.Data[cloudkitty.DefaultsConfigFileName]) + customData[cloudkitty.CustomConfigFileName] = string(cloudkittySecret.Data[cloudkitty.CustomConfigFileName]) + + customSecrets := "" + for _, secretName := range instance.Spec.CustomServiceConfigSecrets { + secret, _, err := secret.GetSecret(ctx, h, secretName, instance.Namespace) + if err != nil { + return err + } + for _, data := range secret.Data { + customSecrets += string(data) + "\n" + } + } + customData[cloudkitty.CustomServiceConfigSecretsFileName] = customSecrets + + configTemplates := []util.Template{ + { + Name: fmt.Sprintf("%s-config-data", instance.Name), + Namespace: instance.Namespace, + Type: util.TemplateTypeConfig, + InstanceType: instance.Kind, + AdditionalTemplate: map[string]string{ + "healthcheck.py": "/cloudkitty/bin/healthcheck.py", + }, + CustomData: customData, + Labels: labels, + }, + } + + return secret.EnsureSecrets(ctx, h, instance, configTemplates, envVars) +} + +// createHashOfInputHashes - creates a hash of hashes which gets added to the resources which requires a restart +// if any of the input resources change, like configs, passwords, ... +// +// returns the hash, whether the hash changed (as a bool) and any error +func (r *CloudKittyProcReconciler) createHashOfInputHashes( + ctx context.Context, + instance *telemetryv1.CloudKittyProc, + envVars map[string]env.Setter, +) (string, bool, error) { + Log := r.GetLogger(ctx) + var hashMap map[string]string + changed := false + mergedMapVars := env.MergeEnvs([]corev1.EnvVar{}, envVars) + hash, err := util.ObjectHash(mergedMapVars) + if err != nil { + return hash, changed, err + } + if hashMap, changed = util.SetHash(instance.Status.Hash, common.InputHashName, hash); changed { + instance.Status.Hash = hashMap + Log.Info(fmt.Sprintf("Input maps hash %s - %s", common.InputHashName, hash)) + } + return hash, changed, nil +} diff --git a/controllers/metricstorage_controller.go b/controllers/metricstorage_controller.go index 2aaa2353..379c3975 100644 --- a/controllers/metricstorage_controller.go +++ b/controllers/metricstorage_controller.go @@ -23,7 +23,6 @@ import ( "net" "reflect" "regexp" - "slices" "strconv" "strings" "time" @@ -32,25 +31,16 @@ import ( discoveryv1 "k8s.io/api/discovery/v1" k8s_errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" - - "k8s.io/apimachinery/pkg/api/meta" logr "github.com/go-logr/logr" "github.com/openstack-k8s-operators/lib-common/modules/ansible" @@ -95,15 +85,7 @@ var ( ) // MetricStorageReconciler reconciles a MetricStorage object -type MetricStorageReconciler struct { - client.Client - Kclient kubernetes.Interface - Scheme *runtime.Scheme - Controller controller.Controller - Watching []string - RESTMapper meta.RESTMapper - Cache cache.Cache -} +type MetricStorageReconciler utils.ConditionalWatchingReconciler // ConnectionInfo holds information about connection to a compute node type ConnectionInfo struct { @@ -321,7 +303,11 @@ func (r *MetricStorageReconciler) reconcileNormal( // Deploy monitoring stack - err := r.ensureWatches(ctx, "monitoringstacks.monitoring.rhobs", &obov1.MonitoringStack{}, eventHandler) + err := utils.EnsureWatches( + ctx, (*utils.ConditionalWatchingReconciler)(r), + "monitoringstacks.monitoring.rhobs", + &obov1.MonitoringStack{}, eventHandler, helper, + ) if err != nil { instance.Status.Conditions.MarkFalse(telemetryv1.MonitoringStackReadyCondition, condition.Reason("Can't own MonitoringStack resource. The Cluster Observability Operator probably isn't installed"), @@ -369,7 +355,13 @@ func (r *MetricStorageReconciler) reconcileNormal( } return []reconcile.Request{{NamespacedName: name}} } - err = r.ensureWatches(ctx, "prometheuses.monitoring.rhobs", &monv1.Prometheus{}, handler.EnqueueRequestsFromMapFunc(prometheusWatchFn)) + err = utils.EnsureWatches( + ctx, (*utils.ConditionalWatchingReconciler)(r), + "prometheuses.monitoring.rhobs", + &monv1.Prometheus{}, + handler.EnqueueRequestsFromMapFunc(prometheusWatchFn), + helper, + ) if err != nil { instance.Status.Conditions.MarkFalse(telemetryv1.PrometheusReadyCondition, condition.Reason("Can't watch prometheus resource. The Cluster Observability Operator probably isn't installed"), @@ -583,7 +575,13 @@ func (r *MetricStorageReconciler) reconcileNormal( } return []reconcile.Request{{NamespacedName: name}} } - err = r.ensureWatches(ctx, "prometheuses.monitoring.rhobs", &monv1.Prometheus{}, handler.EnqueueRequestsFromMapFunc(prometheusWatchFn)) + err = utils.EnsureWatches( + ctx, (*utils.ConditionalWatchingReconciler)(r), + "prometheuses.monitoring.rhobs", + &monv1.Prometheus{}, + handler.EnqueueRequestsFromMapFunc(prometheusWatchFn), + helper, + ) if err != nil { instance.Status.Conditions.MarkFalse(telemetryv1.PrometheusReadyCondition, condition.Reason("Can't watch prometheus resource. The Cluster Observability Operator probably isn't installed"), @@ -718,7 +716,11 @@ func (r *MetricStorageReconciler) createScrapeConfigs( helper *helper.Helper, ) (ctrl.Result, error) { Log := r.GetLogger(ctx) - err := r.ensureWatches(ctx, "scrapeconfigs.monitoring.rhobs", &monv1alpha1.ScrapeConfig{}, eventHandler) + err := utils.EnsureWatches( + ctx, (*utils.ConditionalWatchingReconciler)(r), + "scrapeconfigs.monitoring.rhobs", + &monv1alpha1.ScrapeConfig{}, eventHandler, helper, + ) if err != nil { instance.Status.Conditions.MarkFalse(telemetryv1.ScrapeConfigReadyCondition, condition.Reason("Can't own ScrapeConfig resource. The Cluster Observability Operator probably isn't installed"), @@ -1131,7 +1133,11 @@ func (r *MetricStorageReconciler) createDashboardObjects(ctx context.Context, in } // Deploy PrometheusRule for dashboards - err = r.ensureWatches(ctx, "prometheusrules.monitoring.rhobs", &monv1.PrometheusRule{}, eventHandler) + err = utils.EnsureWatches( + ctx, (*utils.ConditionalWatchingReconciler)(r), + "prometheusrules.monitoring.rhobs", + &monv1.PrometheusRule{}, eventHandler, helper, + ) if err != nil { instance.Status.Conditions.MarkFalse(telemetryv1.DashboardPrometheusRuleReadyCondition, condition.Reason("Can't own PrometheusRule resource. The Cluster Observability Operator probably isn't installed"), @@ -1251,39 +1257,6 @@ func (r *MetricStorageReconciler) createDashboardObjects(ctx context.Context, in return ctrl.Result{}, err } -func (r *MetricStorageReconciler) ensureWatches( - ctx context.Context, - name string, - kind client.Object, - handler handler.EventHandler, -) error { - Log := r.GetLogger(ctx) - if slices.Contains(r.Watching, name) { - // We are already watching the resource - return nil - } - u := &unstructured.Unstructured{} - u.SetGroupVersionKind(schema.GroupVersionKind{ - Group: "apiextensions.k8s.io", - Kind: "CustomResourceDefinition", - Version: "v1", - }) - - err := r.Get(context.Background(), client.ObjectKey{ - Name: name, - }, u) - if err != nil { - return err - } - - Log.Info(fmt.Sprintf("Starting to watch %s", name)) - err = r.Controller.Watch(source.Kind(r.Cache, kind, handler)) - if err == nil { - r.Watching = append(r.Watching, name) - } - return err -} - func getComputeNodesConnectionInfo( instance *telemetryv1.MetricStorage, helper *helper.Helper, diff --git a/controllers/telemetry_controller.go b/controllers/telemetry_controller.go index a4cb1ce5..03091b95 100644 --- a/controllers/telemetry_controller.go +++ b/controllers/telemetry_controller.go @@ -66,6 +66,9 @@ func (r *TelemetryReconciler) GetLogger(ctx context.Context) logr.Logger { // +kubebuilder:rbac:groups=telemetry.openstack.org,resources=loggings,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=telemetry.openstack.org,resources=loggings/status,verbs=get;update;patch // +kubebuilder:rbac:groups=telemetry.openstack.org,resources=loggings/finalizers,verbs=update;delete;patch +// +kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkitties,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkitties/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=telemetry.openstack.org,resources=cloudkitties/finalizers,verbs=update;delete;patch // +kubebuilder:rbac:groups=rabbitmq.openstack.org,resources=transporturls,verbs=get;list;watch;create;update;patch;delete // Reconcile reconciles a Telemetry @@ -141,6 +144,7 @@ func (r *TelemetryReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( condition.UnknownCondition(telemetryv1.AutoscalingReadyCondition, condition.InitReason, telemetryv1.AutoscalingReadyInitMessage), condition.UnknownCondition(telemetryv1.MetricStorageReadyCondition, condition.InitReason, telemetryv1.MetricStorageReadyInitMessage), condition.UnknownCondition(telemetryv1.LoggingReadyCondition, condition.InitReason, telemetryv1.LoggingReadyInitMessage), + condition.UnknownCondition(telemetryv1.CloudKittyReadyCondition, condition.InitReason, telemetryv1.CloudKittyReadyInitMessage), ) instance.Status.Conditions.Init(&cl) @@ -225,6 +229,13 @@ func (r *TelemetryReconciler) reconcileNormal(ctx context.Context, instance *tel return ctrlResult, nil } + ctrlResult, err = r.reconcileCloudKitty(ctx, instance, helper) + if err != nil { + return ctrl.Result{}, err + } else if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + // We reached the end of the Reconcile, update the Ready condition based on // the sub conditions if instance.Status.Conditions.AllSubConditionIsTrue() { @@ -553,6 +564,89 @@ func (r TelemetryReconciler) reconcileLogging(ctx context.Context, instance *tel return ctrl.Result{}, nil } +// reconcileAutoscaling ... +func (r TelemetryReconciler) reconcileCloudKitty(ctx context.Context, instance *telemetryv1.Telemetry, helper *helper.Helper) (ctrl.Result, error) { + const ( + cloudKittyNamespaceLabel = "CloudKitty.Namespace" + cloudKittyNameLabel = "CloudKitty.Name" + cloudKittyName = "cloudkitty" + ) + cloudKittyInstance := &telemetryv1.CloudKitty{ + ObjectMeta: metav1.ObjectMeta{ + Name: cloudKittyName, + Namespace: instance.Namespace, + }, + } + + if instance.Spec.CloudKitty.Enabled == nil || !*instance.Spec.CloudKitty.Enabled { + if res, err := utils.EnsureDeleted(ctx, helper, cloudKittyInstance); err != nil { + return res, err + } + instance.Status.Conditions.Remove(telemetryv1.CloudKittyReadyCondition) + return ctrl.Result{}, nil + } + + if instance.Spec.CloudKitty.NodeSelector == nil { + instance.Spec.CloudKitty.NodeSelector = instance.Spec.NodeSelector + } + + if instance.Spec.CloudKitty.TopologyRef == nil { + instance.Spec.CloudKitty.TopologyRef = instance.Spec.TopologyRef + } + + helper.GetLogger().Info("Reconciling CloudKitty", cloudKittyNamespaceLabel, instance.Namespace, cloudKittyNameLabel, cloudKittyName) + op, err := controllerutil.CreateOrPatch(ctx, helper.GetClient(), cloudKittyInstance, func() error { + instance.Spec.CloudKitty.CloudKittySpec.DeepCopyInto(&cloudKittyInstance.Spec) + + err := controllerutil.SetControllerReference(helper.GetBeforeObject(), cloudKittyInstance, helper.GetScheme()) + if err != nil { + return err + } + return nil + }) + + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + telemetryv1.CloudKittyReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + // Check the observed Generation and mirror the condition from the + // underlying resource reconciliation + autoObsGen, err := r.checkCloudKittyGeneration(instance) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + telemetryv1.CloudKittyReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + telemetryv1.CloudKittyReadyErrorMessage, + err.Error())) + return ctrl.Result{}, nil + } + if !autoObsGen { + instance.Status.Conditions.Set(condition.UnknownCondition( + telemetryv1.CloudKittyReadyCondition, + condition.InitReason, + telemetryv1.AutoscalingReadyRunningMessage, + )) + } else { + // Mirror Cloudkitty condition status + c := cloudKittyInstance.Status.Conditions.Mirror(telemetryv1.CloudKittyReadyCondition) + if c != nil { + instance.Status.Conditions.Set(c) + } + } + if op != controllerutil.OperationResultNone && autoObsGen { + helper.GetLogger().Info(fmt.Sprintf("%s %s - %s", cloudKittyName, cloudKittyInstance.Name, op)) + } + + return ctrl.Result{}, nil +} + // SetupWithManager sets up the controller with the Manager. func (r *TelemetryReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). @@ -561,6 +655,7 @@ func (r *TelemetryReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&telemetryv1.Autoscaling{}). Owns(&telemetryv1.MetricStorage{}). Owns(&telemetryv1.Logging{}). + Owns(&telemetryv1.CloudKitty{}). Complete(r) } @@ -647,3 +742,24 @@ func (r *TelemetryReconciler) checkLoggingGeneration( } return true, nil } + +// checkCloudKittyGeneration - +func (r *TelemetryReconciler) checkCloudKittyGeneration( + instance *telemetryv1.Telemetry, +) (bool, error) { + Log := r.GetLogger(context.Background()) + clm := &telemetryv1.CloudKittyList{} + listOpts := []client.ListOption{ + client.InNamespace(instance.Namespace), + } + if err := r.List(context.Background(), clm, listOpts...); err != nil { + Log.Error(err, "Unable to retrieve CloudKitty CR %w") + return false, err + } + for _, item := range clm.Items { + if item.Generation != item.Status.ObservedGeneration { + return false, nil + } + } + return true, nil +} diff --git a/go.mod b/go.mod index 23b98454..0a97c2f0 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,9 @@ go 1.24.4 replace github.com/openstack-k8s-operators/telemetry-operator/api => ./api require ( + github.com/cert-manager/cert-manager v1.16.5 github.com/go-logr/logr v1.4.3 + github.com/grafana/loki/operator/api/loki v0.0.0-20250910094332-a082b8a061ba github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.7 github.com/onsi/ginkgo/v2 v2.27.1 github.com/onsi/gomega v1.38.2 @@ -13,6 +15,7 @@ require ( github.com/openstack-k8s-operators/infra-operator/apis v0.6.1-0.20251002120642-c2d58c6fc03e github.com/openstack-k8s-operators/keystone-operator/api v0.6.1-0.20251027074845-ed8154b20ad1 github.com/openstack-k8s-operators/lib-common/modules/ansible v0.6.1-0.20250929092825-4c2402451077 + github.com/openstack-k8s-operators/lib-common/modules/certmanager v0.6.0 github.com/openstack-k8s-operators/lib-common/modules/common v0.6.1-0.20250929092825-4c2402451077 github.com/openstack-k8s-operators/mariadb-operator/api v0.6.1-0.20251002102126-84fdf59cb2fb github.com/openstack-k8s-operators/ovn-operator/api v0.6.1-0.20251002145853-52dcb63c343b @@ -90,6 +93,7 @@ require ( k8s.io/apiextensions-apiserver v0.33.2 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250902184714-7fc278399c7f // indirect + sigs.k8s.io/gateway-api v1.2.1 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect diff --git a/go.sum b/go.sum index 1cd50003..0d172c23 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,8 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1 github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cert-manager/cert-manager v1.16.5 h1:XIhKoS4zQV9RHXAkqQW0NLivvoxAnWzbPsy9BG6cPVc= +github.com/cert-manager/cert-manager v1.16.5/go.mod h1:0DwmIGjMOreiP7/6gAqnjaBRJ+yHCfZ5DP7NNqKV+tY= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -58,6 +60,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gophercloud/gophercloud/v2 v2.8.0 h1:of2+8tT6+FbEYHfYC8GBu8TXJNsXYSNm9KuvpX7Neqo= github.com/gophercloud/gophercloud/v2 v2.8.0/go.mod h1:Ki/ILhYZr/5EPebrPL9Ej+tUg4lqx71/YH2JWVeU+Qk= +github.com/grafana/loki/operator/api/loki v0.0.0-20250910094332-a082b8a061ba h1:P5Wgp2HfGfNPLCPpS+YqquKdrrl4tW0El7VX23D6vtg= +github.com/grafana/loki/operator/api/loki v0.0.0-20250910094332-a082b8a061ba/go.mod h1:OBAgJh0mLYRvziBzBKr4/anrPHqGY9qEfuNXCpnUNi0= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -106,6 +110,8 @@ github.com/openstack-k8s-operators/keystone-operator/api v0.6.1-0.20251027074845 github.com/openstack-k8s-operators/keystone-operator/api v0.6.1-0.20251027074845-ed8154b20ad1/go.mod h1:FMFoO4MjEQ85JpdLtDHxYSZxvJ9KzHua+HdKhpl0KRI= github.com/openstack-k8s-operators/lib-common/modules/ansible v0.6.1-0.20250929092825-4c2402451077 h1:wAonK5ng4dZdQPdBGnLRLQ0zYu5cQ0OmDO46iiN+Quw= github.com/openstack-k8s-operators/lib-common/modules/ansible v0.6.1-0.20250929092825-4c2402451077/go.mod h1:/t8UOevAIOdAu7SAkfwfyZj6p2pkuupl3mZJPMNqNOo= +github.com/openstack-k8s-operators/lib-common/modules/certmanager v0.6.0 h1:cFOyP37qQ9T1D6mVTCwuPGt86LB4sTErpHT+L1e+VKY= +github.com/openstack-k8s-operators/lib-common/modules/certmanager v0.6.0/go.mod h1:jgfvFeljXxot0LODLYCmjESxoMXbClXcBcf0DaX4zA0= github.com/openstack-k8s-operators/lib-common/modules/common v0.6.1-0.20250929092825-4c2402451077 h1:missBxDwEfOdkHVKd6zyCyaQjSObw9Ge1O4A7WU5EuM= github.com/openstack-k8s-operators/lib-common/modules/common v0.6.1-0.20250929092825-4c2402451077/go.mod h1:CjsYQ/dUr4eUmBEvM3UFUxvYvl2bAhGfGflaD+N4fWA= github.com/openstack-k8s-operators/lib-common/modules/openstack v0.6.1-0.20251021145236-2b84ec9fd9bb h1:wToXqX7AS1JV3Kna7RcJfkRart8rSGun2biKNfyY6Zg= @@ -245,6 +251,8 @@ k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPG k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.19.7 h1:DLABZfMr20A+AwCZOHhcbcu+TqBXnJZaVBri9K3EO48= sigs.k8s.io/controller-runtime v0.19.7/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/gateway-api v1.2.1 h1:fZZ/+RyRb+Y5tGkwxFKuYuSRQHu9dZtbjenblleOLHM= +sigs.k8s.io/gateway-api v1.2.1/go.mod h1:EpNfEXNjiYfUJypf0eZ0P5iXA9ekSGWaS1WgPaM42X0= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= diff --git a/main.go b/main.go index e37f1ac7..4bf8af36 100644 --- a/main.go +++ b/main.go @@ -29,6 +29,7 @@ import ( "k8s.io/client-go/kubernetes" _ "k8s.io/client-go/plugin/pkg/client/auth" + certmgrv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -40,6 +41,7 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + lokistackv1 "github.com/grafana/loki/operator/api/loki/v1" networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" heatv1 "github.com/openstack-k8s-operators/heat-operator/api/v1beta1" memcachedv1 "github.com/openstack-k8s-operators/infra-operator/apis/memcached/v1beta1" @@ -84,6 +86,8 @@ func init() { utilruntime.Must(rabbitmqclusterv1.AddToScheme(scheme)) utilruntime.Must(networkv1.AddToScheme(scheme)) utilruntime.Must(topologyv1.AddToScheme(scheme)) + utilruntime.Must(lokistackv1.AddToScheme(scheme)) + utilruntime.Must(certmgrv1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } @@ -203,10 +207,40 @@ func main() { os.Exit(1) } + if err = (&controllers.CloudKittyReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Kclient: kclient, + RESTMapper: mgr.GetRESTMapper(), + Cache: mgr.GetCache(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create CloudKitty controller") + os.Exit(1) + } + + if err = (&controllers.CloudKittyAPIReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Kclient: kclient, + }).SetupWithManager(context.Background(), mgr); err != nil { + setupLog.Error(err, "unable to create CloudKitty API controller") + os.Exit(1) + } + + if err = (&controllers.CloudKittyProcReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + Kclient: kclient, + }).SetupWithManager(context.Background(), mgr); err != nil { + setupLog.Error(err, "unable to create CloudKitty Processor controller") + os.Exit(1) + } + // Acquire environmental defaults and initialize defaults with them telemetryv1beta1.SetupDefaultsTelemetry() telemetryv1beta1.SetupDefaultsCeilometer() telemetryv1beta1.SetupDefaultsAutoscaling() + telemetryv1beta1.SetupDefaultsCloudKitty() // Setup webhooks if requested checker := healthz.Ping @@ -228,6 +262,10 @@ func main() { setupLog.Error(err, "unable to create webhook", "webhook", "MetricStorage") os.Exit(1) } + if err = (&telemetryv1beta1.CloudKitty{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "CloudKitty") + os.Exit(1) + } checker = mgr.GetWebhookServer().StartedChecker() } diff --git a/pkg/cloudkitty/cert.go b/pkg/cloudkitty/cert.go new file mode 100644 index 00000000..4a72e4cb --- /dev/null +++ b/pkg/cloudkitty/cert.go @@ -0,0 +1,67 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudkitty + +import ( + "fmt" + + certmgrv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1" + certmgrmetav1 "github.com/cert-manager/cert-manager/pkg/apis/meta/v1" + "github.com/openstack-k8s-operators/lib-common/modules/certmanager" + telemetryv1 "github.com/openstack-k8s-operators/telemetry-operator/api/v1beta1" +) + +// Certificate defines a client certificate for communication between cloudkitty and loki +func Certificate( + instance *telemetryv1.CloudKitty, + labels map[string]string, + issuer *certmgrv1.Issuer, +) *certmgrv1.Certificate { + cert := certmanager.Cert( + ClientCertSecretName, + instance.Namespace, + labels, + certmgrv1.CertificateSpec{ + CommonName: fmt.Sprintf("%s.%s.svc", instance.Name, instance.Namespace), + DNSNames: []string{ + fmt.Sprintf("%s.%s.svc", instance.Name, instance.Namespace), + fmt.Sprintf("%s.%s.svc.cluster.local", instance.Name, instance.Namespace), + }, + SecretName: ClientCertSecretName, + Subject: &certmgrv1.X509Subject{ + OrganizationalUnits: []string{ + instance.Name, + }, + }, + PrivateKey: &certmgrv1.CertificatePrivateKey{ + Algorithm: "RSA", + Size: 3072, + }, + Usages: []certmgrv1.KeyUsage{ + certmgrv1.UsageDigitalSignature, + certmgrv1.UsageKeyEncipherment, + certmgrv1.UsageClientAuth, + }, + IssuerRef: certmgrmetav1.ObjectReference{ + Name: issuer.Name, + Kind: issuer.Kind, + Group: issuer.GroupVersionKind().Group, + }, + }, + ) + return cert +} diff --git a/pkg/cloudkitty/common.go b/pkg/cloudkitty/common.go new file mode 100644 index 00000000..9c78b72b --- /dev/null +++ b/pkg/cloudkitty/common.go @@ -0,0 +1,161 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudkitty + +import ( + "context" + "fmt" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/secret" + "k8s.io/apimachinery/pkg/types" + "time" + + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type conditionUpdater interface { + Set(c *condition.Condition) + MarkTrue(t condition.Type, messageFormat string, messageArgs ...interface{}) +} + +type topologyHandler interface { + GetSpecTopologyRef() *topologyv1.TopoRef + GetLastAppliedTopology() *topologyv1.TopoRef + SetLastAppliedTopology(t *topologyv1.TopoRef) +} + +// EnsureTopology - +func EnsureTopology( + ctx context.Context, + helper *helper.Helper, + instance topologyHandler, + finalizer string, + conditionUpdater conditionUpdater, + defaultLabelSelector metav1.LabelSelector, +) (*topologyv1.Topology, error) { + + topology, err := topologyv1.EnsureServiceTopology( + ctx, + helper, + instance.GetSpecTopologyRef(), + instance.GetLastAppliedTopology(), + finalizer, + defaultLabelSelector, + ) + if err != nil { + conditionUpdater.Set(condition.FalseCondition( + condition.TopologyReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.TopologyReadyErrorMessage, + err.Error())) + return nil, fmt.Errorf("waiting for Topology requirements: %w", err) + } + // update the Status with the last retrieved Topology (or set it to nil) + instance.SetLastAppliedTopology(instance.GetSpecTopologyRef()) + // update the Topology condition only when a Topology is referenced and has + // been retrieved (err == nil) + if tr := instance.GetSpecTopologyRef(); tr != nil { + // update the TopologyRef associated condition + conditionUpdater.MarkTrue( + condition.TopologyReadyCondition, + condition.TopologyReadyMessage, + ) + } + return topology, nil +} + +// VerifyServiceSecret - ensures that the Secret object exists and the expected +// fields are in the Secret. It also sets a hash of the values of the expected +// fields passed as input. +func VerifyServiceSecret( + ctx context.Context, + secretName types.NamespacedName, + expectedFields []string, + reader client.Reader, + conditionUpdater conditionUpdater, + requeueTimeout time.Duration, + envVars *map[string]env.Setter, +) (ctrl.Result, error) { + + hash, res, err := secret.VerifySecret(ctx, secretName, expectedFields, reader, requeueTimeout) + if err != nil { + conditionUpdater.Set(condition.FalseCondition( + condition.InputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.InputReadyErrorMessage, + err.Error())) + return res, err + } else if (res != ctrl.Result{}) { + log.FromContext(ctx).Info(fmt.Sprintf("OpenStack secret %s not found", secretName)) + conditionUpdater.Set(condition.FalseCondition( + condition.InputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.InputReadyWaitingMessage)) + return res, nil + } + (*envVars)[secretName.Name] = env.SetValue(hash) + return ctrl.Result{}, nil +} + +// VerifyConfigSecrets - It iterates over the secretNames passed as input and +// sets the hash of values in the envVars map. +func VerifyConfigSecrets( + ctx context.Context, + h *helper.Helper, + conditionUpdater conditionUpdater, + secretNames []string, + namespace string, + envVars *map[string]env.Setter, +) (ctrl.Result, error) { + var hash string + var err error + for _, secretName := range secretNames { + _, hash, err = secret.GetSecret(ctx, h, secretName, namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + log.FromContext(ctx).Info(fmt.Sprintf("Secret %s not found", secretName)) + conditionUpdater.Set(condition.FalseCondition( + condition.InputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.InputReadyWaitingMessage)) + return ResultRequeue, nil + } + conditionUpdater.Set(condition.FalseCondition( + condition.InputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.InputReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + // Add a prefix to the var name to avoid accidental collision with other non-secret + // vars. The secret names themselves will be unique. + (*envVars)["secret-"+secretName] = env.SetValue(hash) + } + + return ctrl.Result{}, nil +} diff --git a/pkg/cloudkitty/const.go b/pkg/cloudkitty/const.go new file mode 100644 index 00000000..7a862dd1 --- /dev/null +++ b/pkg/cloudkitty/const.go @@ -0,0 +1,71 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudkitty + +import ( + "time" + + ctrl "sigs.k8s.io/controller-runtime" +) + +const ( + // ServiceName - + ServiceName = "cloudkitty" + // ServiceType - + ServiceType = "rating" + // DatabaseName - + DatabaseName = "cloudkitty" + + // DefaultsConfigFileName - + DefaultsConfigFileName = "cloudkitty.conf" + // ServiceConfigFileName - + ServiceConfigFileName = "01-service-defaults.conf" + // CustomConfigFileName - + CustomConfigFileName = "02-global-custom.conf" + // CustomServiceConfigFileName - + CustomServiceConfigFileName = "03-service-custom.conf" + // CustomServiceConfigSecretsFileName - + CustomServiceConfigSecretsFileName = "04-service-custom-secrets.conf" + // MyCnfFileName - + MyCnfFileName = "my.cnf" + + // CloudKittyPublicPort - + CloudKittyPublicPort int32 = 8889 + // CloudKittyInternalPort - + CloudKittyInternalPort int32 = 8889 + + // ShortDuration is the duration for short requeues + ShortDuration = time.Duration(5) * time.Second + // NormalDuration is the duration for normal requeues + NormalDuration = time.Duration(10) * time.Second + + // PrometheusEndpointSecret - The name of the secret that contains the Prometheus endpoint configuration. + PrometheusEndpointSecret = "metric-storage-prometheus-endpoint" + + // ClientCertSecretName is the name of the client certificate secret + ClientCertSecretName = "cert-cloudkitty-client-internal" + + // CaConfigmapName is the name of the CA configmap + CaConfigmapName = "lokistack-ca" + // CaConfigmapKey is the key in the CA configmap + CaConfigmapKey = "ca.crt" + + // CloudKittyUserID - + CloudKittyUserID = 42406 +) + +// ResultRequeue is a ctrl.Result that requeues after NormalDuration +var ResultRequeue = ctrl.Result{RequeueAfter: NormalDuration} diff --git a/pkg/cloudkitty/dbsync.go b/pkg/cloudkitty/dbsync.go new file mode 100644 index 00000000..f70bd7fc --- /dev/null +++ b/pkg/cloudkitty/dbsync.go @@ -0,0 +1,114 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cloudkitty provides CloudKitty service configuration and management utilities +package cloudkitty + +import ( + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + telemetryv1 "github.com/openstack-k8s-operators/telemetry-operator/api/v1beta1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +const ( + // DBSyncCommand - + // TODO: Once we work on update/upgrades revisit the command in the + // the cloudkitty-dbsync-config.json file. + // If we stop all services during the update/upgrade then we can keep + // the --bump-versions flag. + // If we are doing rolling upgrades we'll need to use the flag + // conditionally (only for adoption) and do the restart cycle of + // services as described in the upstream rolling upgrades process. + dbSyncCommand = "/usr/local/bin/kolla_start" +) + +// DbSyncJob func +func DbSyncJob(instance *telemetryv1.CloudKitty, labels map[string]string, annotations map[string]string) *batchv1.Job { + args := []string{"-c"} + args = append(args, dbSyncCommand) + + // create Volume and VolumeMounts + volumes := GetVolumes(instance.Name) + volumeMounts := GetVolumeMounts("cloudkitty-dbsync") + // add CA cert if defined + if instance.Spec.CloudKittyAPI.TLS.CaBundleSecretName != "" { + volumes = append(volumes, instance.Spec.CloudKittyAPI.TLS.CreateVolume()) + volumeMounts = append(volumeMounts, instance.Spec.CloudKittyAPI.TLS.CreateVolumeMounts(nil)...) + } + + runAsUser := int64(CloudKittyUserID) + envVars := map[string]env.Setter{} + envVars["KOLLA_CONFIG_STRATEGY"] = env.SetValue("COPY_ALWAYS") + envVars["KOLLA_BOOTSTRAP"] = env.SetValue("TRUE") + cloudKittyPassword := []corev1.EnvVar{ + { + Name: "CloudKittyPassword", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: instance.Spec.Secret, + }, + Key: instance.Spec.PasswordSelectors.CloudKittyService, + }, + }, + }, + } + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: ServiceName + "-db-sync", + Namespace: instance.Namespace, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ServiceAccountName: instance.RbacResourceName(), + Containers: []corev1.Container{ + { + Name: ServiceName + "-db-sync", + Command: []string{ + "/bin/bash", + }, + Args: args, + Image: instance.Spec.CloudKittyAPI.ContainerImage, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &runAsUser, + RunAsNonRoot: ptr.To(true), + }, + Env: env.MergeEnvs(cloudKittyPassword, envVars), + VolumeMounts: volumeMounts, + }, + }, + Volumes: volumes, + }, + }, + }, + } + + if instance.Spec.NodeSelector != nil { + job.Spec.Template.Spec.NodeSelector = *instance.Spec.NodeSelector + } + + return job +} diff --git a/pkg/cloudkitty/funcs.go b/pkg/cloudkitty/funcs.go new file mode 100644 index 00000000..43210dad --- /dev/null +++ b/pkg/cloudkitty/funcs.go @@ -0,0 +1,49 @@ +package cloudkitty + +import ( + common "github.com/openstack-k8s-operators/lib-common/modules/common" + "github.com/openstack-k8s-operators/lib-common/modules/common/affinity" + + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// GetOwningCloudKittyName - Given a CloudKittyAPI or CloudKittyProc +// object, returning the parent CloudKitty object that created it (if any) +func GetOwningCloudKittyName(instance client.Object) string { + for _, ownerRef := range instance.GetOwnerReferences() { + if ownerRef.Kind == "CloudKitty" { + return ownerRef.Name + } + } + + return "" +} + +// GetNetworkAttachmentAddrs - Returns a list of IP addresses for all network attachments. +func GetNetworkAttachmentAddrs(namespace string, networkAttachments []string, networkAttachmentStatus map[string][]string) []string { + networkAttachmentAddrs := []string{} + + for _, network := range networkAttachments { + networkName := namespace + "/" + network + if networkAddrs, ok := networkAttachmentStatus[networkName]; ok { + networkAttachmentAddrs = append(networkAttachmentAddrs, networkAddrs...) + } + } + + return networkAttachmentAddrs +} + +// GetPodAffinity - Returns a corev1.Affinity reference for the specified component. +func GetPodAffinity(componentName string) *corev1.Affinity { + // If possible two pods of the same component (e.g cloudkitty-api) should not + // run on the same worker node. If this is not possible they get still + // created on the same worker node. + return affinity.DistributePods( + common.ComponentSelector, + []string{ + componentName, + }, + corev1.LabelHostname, + ) +} diff --git a/pkg/cloudkitty/lokistack.go b/pkg/cloudkitty/lokistack.go new file mode 100644 index 00000000..792d9ba9 --- /dev/null +++ b/pkg/cloudkitty/lokistack.go @@ -0,0 +1,212 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudkitty + +import ( + "errors" + "fmt" + "slices" + + lokistackv1 "github.com/grafana/loki/operator/api/loki/v1" + telemetryv1 "github.com/openstack-k8s-operators/telemetry-operator/api/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var ( + // ErrEffectiveDateRequired is returned when schema effectiveDate is missing + ErrEffectiveDateRequired = errors.New("invalid CloudKitty spec. Field .spec.s3StorageConfig.schema.effectiveDate is required") + // ErrSchemaVersionRequired is returned when schema version is missing + ErrSchemaVersionRequired = errors.New("invalid CloudKitty spec. Field .spec.s3StorageConfig.schema.version is required") + // ErrSecretNameRequired is returned when secret name is missing + ErrSecretNameRequired = errors.New("invalid CloudKitty spec. Field .spec.s3StorageConfig.secret.name is required") + // ErrSecretTypeRequired is returned when secret type is missing + ErrSecretTypeRequired = errors.New("invalid CloudKitty spec. Field .spec.s3StorageConfig.secret.type is required") + // ErrInvalidSecretType is returned when secret type is not valid + ErrInvalidSecretType = errors.New("invalid CloudKitty spec. Field .spec.s3StorageConfig.secret.type needs to be one of: azure, gcs, s3, swift, alibabacloud") + // ErrCANameRequired is returned when TLS CA name is missing + ErrCANameRequired = errors.New("invalid CloudKitty spec. Field .spec.s3StorageConfig.tls.caName is required") +) + +func validateObjectStorageSpec(spec telemetryv1.ObjectStorageSpec) error { + for _, schema := range spec.Schemas { + if schema.EffectiveDate == "" { + return ErrEffectiveDateRequired + } + if schema.Version == "" { + return ErrSchemaVersionRequired + } + } + + if spec.Secret.Name == "" { + return ErrSecretNameRequired + } + + if spec.Secret.Type == "" { + return ErrSecretTypeRequired + } + validTypes := []string{"azure", "gcs", "s3", "swift", "alibabacloud"} + if !slices.Contains(validTypes, spec.Secret.Type) { + return ErrInvalidSecretType + } + + if spec.TLS != nil && spec.TLS.CA == "" { + return ErrCANameRequired + } + + return nil +} + +func getLokiStackObjectStorageSpec(telemetryObjectStorageSpec telemetryv1.ObjectStorageSpec) lokistackv1.ObjectStorageSpec { + var result lokistackv1.ObjectStorageSpec + + if len(telemetryObjectStorageSpec.Schemas) == 0 { + // NOTE: if no schema is defined, use the same as defined in loki-operator. + result.Schemas = []lokistackv1.ObjectStorageSchema{ + { + Version: lokistackv1.ObjectStorageSchemaVersion("v11"), + EffectiveDate: lokistackv1.StorageSchemaEffectiveDate("2020-10-11"), + }, + } + } else { + for _, schema := range telemetryObjectStorageSpec.Schemas { + result.Schemas = append(result.Schemas, lokistackv1.ObjectStorageSchema{ + Version: lokistackv1.ObjectStorageSchemaVersion(schema.Version), + EffectiveDate: lokistackv1.StorageSchemaEffectiveDate(schema.EffectiveDate), + }) + } + } + + result.Secret.Type = lokistackv1.ObjectStorageSecretType(telemetryObjectStorageSpec.Secret.Type) + result.Secret.Name = telemetryObjectStorageSpec.Secret.Name + result.Secret.CredentialMode = lokistackv1.CredentialMode(telemetryObjectStorageSpec.Secret.CredentialMode) + + if telemetryObjectStorageSpec.TLS != nil { + result.TLS = &lokistackv1.ObjectStorageTLSSpec{ + CASpec: lokistackv1.CASpec{ + CAKey: telemetryObjectStorageSpec.TLS.CAKey, + CA: telemetryObjectStorageSpec.TLS.CA, + }, + } + if result.TLS.CAKey == "" { + // NOTE: if no CAKey is defined, use the same as defined in loki-operator + result.TLS.CAKey = "service-ca.crt" + } + } + + return result +} + +// LokiStack defines a lokistack for cloudkitty +func LokiStack( + instance *telemetryv1.CloudKitty, + labels map[string]string, +) (*lokistackv1.LokiStack, error) { + err := validateObjectStorageSpec(instance.Spec.S3StorageConfig) + if err != nil { + return nil, err + } + size := "1x.demo" + if instance.Spec.LokiStackSize != "" { + size = instance.Spec.LokiStackSize + } + lokiStack := &lokistackv1.LokiStack{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-lokistack", instance.Name), + Namespace: instance.Namespace, + Labels: labels, + }, + Spec: lokistackv1.LokiStackSpec{ + // TODO: What size do we even want? I assume something + // smallish since only rating interact with this + Size: lokistackv1.LokiStackSizeType(size), + Storage: getLokiStackObjectStorageSpec(instance.Spec.S3StorageConfig), + StorageClassName: instance.Spec.StorageClass, + Tenants: &lokistackv1.TenantsSpec{ + Mode: lokistackv1.Static, + Authentication: []lokistackv1.AuthenticationSpec{ + { + TenantName: instance.Name, + TenantID: instance.Name, + MTLS: &lokistackv1.MTLSSpec{ + CA: &lokistackv1.CASpec{ + CAKey: CaConfigmapKey, + CA: fmt.Sprintf("%s-%s", instance.Name, CaConfigmapName), + }, + }, + }, + }, + Authorization: &lokistackv1.AuthorizationSpec{ + // TODO: Determine what exactly this does and what's needed here + Roles: []lokistackv1.RoleSpec{ + { + Name: "cloudkitty-logs", + Resources: []string{ + "logs", + }, + Tenants: []string{ + "cloudkitty", + }, + Permissions: []lokistackv1.PermissionType{ + lokistackv1.Write, + lokistackv1.Read, + }, + }, + { + Name: "cluster-reader", + Resources: []string{ + "logs", + }, + Tenants: []string{ + "cloudkitty", + }, + Permissions: []lokistackv1.PermissionType{ + lokistackv1.Read, + }, + }, + }, + RoleBindings: []lokistackv1.RoleBindingsSpec{ + { + Name: "cloudkitty-logs", + Subjects: []lokistackv1.Subject{ + { + Name: "cloudkitty", + Kind: lokistackv1.Group, + }, + }, + Roles: []string{ + "cloudkitty-logs", + }, + }, + { + Name: "cluster-reader", + Subjects: []lokistackv1.Subject{ + { + Name: "cloudkitty-logs-admin", + Kind: lokistackv1.Group, + }, + }, + Roles: []string{ + "cluster-reader", + }, + }, + }, + }, + }, + }, + } + return lokiStack, nil +} diff --git a/pkg/cloudkitty/storageinit.go b/pkg/cloudkitty/storageinit.go new file mode 100644 index 00000000..21273920 --- /dev/null +++ b/pkg/cloudkitty/storageinit.go @@ -0,0 +1,113 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cloudkitty provides CloudKitty service configuration and management utilities +package cloudkitty + +import ( + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + telemetryv1 "github.com/openstack-k8s-operators/telemetry-operator/api/v1beta1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +const ( + // storageInitCommand - + // TODO: Once we work on update/upgrades revisit the command in the + // the cloudkitty-storageinit-config.json file. + // If we stop all services during the update/upgrade then we can keep + // the --bump-versions flag. + // If we are doing rolling upgrades we'll need to use the flag + // conditionally (only for adoption) and do the restart cycle of + // services as described in the upstream rolling upgrades process. + storageInitCommand = "/usr/local/bin/kolla_start" +) + +// StorageInitJob func +func StorageInitJob(instance *telemetryv1.CloudKitty, labels map[string]string, annotations map[string]string) *batchv1.Job { + args := []string{"-c", storageInitCommand} + + // create Volume and VolumeMounts + volumes := GetVolumes(instance.Name) + volumeMounts := GetVolumeMounts("cloudkitty-storageinit") + // add CA cert if defined + if instance.Spec.CloudKittyAPI.TLS.CaBundleSecretName != "" { + volumes = append(volumes, instance.Spec.CloudKittyAPI.TLS.CreateVolume()) + volumeMounts = append(volumeMounts, instance.Spec.CloudKittyAPI.TLS.CreateVolumeMounts(nil)...) + } + + runAsUser := int64(CloudKittyUserID) + envVars := map[string]env.Setter{} + envVars["KOLLA_CONFIG_STRATEGY"] = env.SetValue("COPY_ALWAYS") + envVars["KOLLA_BOOTSTRAP"] = env.SetValue("TRUE") + cloudKittyPassword := []corev1.EnvVar{ + { + Name: "CloudKittyPassword", + ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: instance.Spec.Secret, + }, + Key: instance.Spec.PasswordSelectors.CloudKittyService, + }, + }, + }, + } + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: ServiceName + "-storageinit", + Namespace: instance.Namespace, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ServiceAccountName: instance.RbacResourceName(), + Containers: []corev1.Container{ + { + Name: ServiceName + "-storageinit", + Command: []string{ + "/bin/bash", + }, + Args: args, + Image: instance.Spec.CloudKittyAPI.ContainerImage, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &runAsUser, + RunAsNonRoot: ptr.To(true), + }, + Env: env.MergeEnvs(cloudKittyPassword, envVars), + VolumeMounts: volumeMounts, + }, + }, + Volumes: volumes, + }, + }, + }, + } + + if instance.Spec.NodeSelector != nil { + job.Spec.Template.Spec.NodeSelector = *instance.Spec.NodeSelector + } + + return job +} diff --git a/pkg/cloudkitty/volumes.go b/pkg/cloudkitty/volumes.go new file mode 100644 index 00000000..23478a3a --- /dev/null +++ b/pkg/cloudkitty/volumes.go @@ -0,0 +1,86 @@ +package cloudkitty + +import ( + corev1 "k8s.io/api/core/v1" +) + +var ( + // scriptMode is the default permissions mode for Scripts volume + scriptMode int32 = 0755 + // configMode is the 640 permissions mode + configMode int32 = 0640 + // certMode is the 400 permissions mode + certMode int32 = 0400 +) + +// GetVolumes - service volumes +func GetVolumes(name string) []corev1.Volume { + return []corev1.Volume{ + { + Name: "scripts", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + DefaultMode: &scriptMode, + SecretName: name + "-scripts", + }, + }, + }, { + Name: "config-data", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + DefaultMode: &configMode, + SecretName: name + "-config-data", + }, + }, + }, { + Name: "certs", + VolumeSource: corev1.VolumeSource{ + Projected: &corev1.ProjectedVolumeSource{ + Sources: []corev1.VolumeProjection{ + { + Secret: &corev1.SecretProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: ClientCertSecretName, + }, + }, + }, { + ConfigMap: &corev1.ConfigMapProjection{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: name + "-lokistack-gateway-ca-bundle", + }, + }, + }, + }, + DefaultMode: &certMode, + }, + }, + }, + } +} + +// GetVolumeMounts - general VolumeMounts +func GetVolumeMounts(serviceName string) []corev1.VolumeMount { + return []corev1.VolumeMount{ + { + Name: "scripts", + MountPath: "/var/lib/openstack/bin", + ReadOnly: true, + }, + { + Name: "config-data", + MountPath: "/var/lib/openstack/config", + ReadOnly: true, + }, + { + Name: "config-data", + MountPath: "/var/lib/kolla/config_files/config.json", + SubPath: serviceName + "-config.json", + ReadOnly: true, + }, + { + Name: "certs", + MountPath: "/var/lib/openstack/loki-certs", + ReadOnly: true, + }, + } +} diff --git a/pkg/cloudkittyapi/const.go b/pkg/cloudkittyapi/const.go new file mode 100644 index 00000000..e82c5afa --- /dev/null +++ b/pkg/cloudkittyapi/const.go @@ -0,0 +1,25 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cloudkittyapi provides CloudKitty API service configuration and management utilities +package cloudkittyapi + +const ( + // ComponentName - + ComponentName = "cloudkitty-api" + + //LogFile - + LogFile = "/var/log/cloudkitty/cloudkitty-api.log" +) diff --git a/pkg/cloudkittyapi/statefulset.go b/pkg/cloudkittyapi/statefulset.go new file mode 100644 index 00000000..21db00aa --- /dev/null +++ b/pkg/cloudkittyapi/statefulset.go @@ -0,0 +1,186 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudkittyapi + +import ( + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + "github.com/openstack-k8s-operators/lib-common/modules/common/service" + "github.com/openstack-k8s-operators/lib-common/modules/common/tls" + telemetryv1 "github.com/openstack-k8s-operators/telemetry-operator/api/v1beta1" + "github.com/openstack-k8s-operators/telemetry-operator/pkg/cloudkitty" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" +) + +const ( + // ServiceCommand - + ServiceCommand = "/usr/local/bin/kolla_start" +) + +// StatefulSet func +func StatefulSet( + instance *telemetryv1.CloudKittyAPI, + configHash string, + labels map[string]string, + annotations map[string]string, + topology *topologyv1.Topology, +) (*appsv1.StatefulSet, error) { + runAsUser := int64(cloudkitty.CloudKittyUserID) + + livenessProbe := &corev1.Probe{ + // TODO might need tuning + TimeoutSeconds: 5, + PeriodSeconds: 5, + InitialDelaySeconds: 30, + } + readinessProbe := &corev1.Probe{ + // TODO might need tuning + TimeoutSeconds: 5, + PeriodSeconds: 5, + InitialDelaySeconds: 30, + } + + args := []string{"-c", ServiceCommand} + // + // https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + // + livenessProbe.HTTPGet = &corev1.HTTPGetAction{ + Path: "/healthcheck", + Port: intstr.IntOrString{Type: intstr.Int, IntVal: int32(cloudkitty.CloudKittyPublicPort)}, + } + readinessProbe.HTTPGet = livenessProbe.HTTPGet + + if instance.Spec.TLS.API.Enabled(service.EndpointPublic) { + livenessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS + readinessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS + } + + // create Volume and VolumeMounts + volumes := GetVolumes(cloudkitty.GetOwningCloudKittyName(instance), instance.Name) + volumeMounts := GetVolumeMounts() + + // add CA cert if defined + if instance.Spec.TLS.CaBundleSecretName != "" { + volumes = append(volumes, instance.Spec.TLS.CreateVolume()) + volumeMounts = append(volumeMounts, instance.Spec.TLS.CreateVolumeMounts(nil)...) + } + + for _, endpt := range []service.Endpoint{service.EndpointInternal, service.EndpointPublic} { + if instance.Spec.TLS.API.Enabled(endpt) { + var tlsEndptCfg tls.GenericService + switch endpt { + case service.EndpointPublic: + tlsEndptCfg = instance.Spec.TLS.API.Public + case service.EndpointInternal: + tlsEndptCfg = instance.Spec.TLS.API.Internal + } + + svc, err := tlsEndptCfg.ToService() + if err != nil { + return nil, err + } + volumes = append(volumes, svc.CreateVolume(endpt.String())) + volumeMounts = append(volumeMounts, svc.CreateVolumeMounts(endpt.String())...) + } + } + + envVars := map[string]env.Setter{} + envVars["KOLLA_CONFIG_STRATEGY"] = env.SetValue("COPY_ALWAYS") + envVars["CONFIG_HASH"] = env.SetValue(configHash) + + statefulset := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + Namespace: instance.Namespace, + Labels: labels, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + PodManagementPolicy: appsv1.ParallelPodManagement, + Replicas: instance.Spec.Replicas, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + Labels: labels, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: instance.Spec.ServiceAccount, + Containers: []corev1.Container{ + // the first container in a pod is the default selected + // by oc log so define the log stream container first. + { + Name: instance.Name + "-log", + Command: []string{ + "/usr/bin/dumb-init", + }, + Args: []string{ + "--single-child", + "--", + "/bin/sh", + "-c", + "/usr/bin/tail -n+1 -F " + LogFile + " 2>/dev/null", + }, + Image: instance.Spec.ContainerImage, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + VolumeMounts: []corev1.VolumeMount{GetLogVolumeMount()}, + Resources: instance.Spec.Resources, + }, + { + Name: ComponentName, + Command: []string{ + "/bin/bash", + }, + Args: args, + Image: instance.Spec.ContainerImage, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + VolumeMounts: volumeMounts, + Resources: instance.Spec.Resources, + ReadinessProbe: readinessProbe, + LivenessProbe: livenessProbe, + }, + }, + SecurityContext: &corev1.PodSecurityContext{ + RunAsUser: &runAsUser, + RunAsNonRoot: ptr.To(true), + }, + Volumes: volumes, + }, + }, + }, + } + + if instance.Spec.NodeSelector != nil { + statefulset.Spec.Template.Spec.NodeSelector = *instance.Spec.NodeSelector + } + + if topology != nil { + topology.ApplyTo(&statefulset.Spec.Template) + } else { + // If possible two pods of the same service should not + // run on the same worker node. If this is not possible + // the get still created on the same worker node. + statefulset.Spec.Template.Spec.Affinity = cloudkitty.GetPodAffinity(ComponentName) + } + + return statefulset, nil +} diff --git a/pkg/cloudkittyapi/volumes.go b/pkg/cloudkittyapi/volumes.go new file mode 100644 index 00000000..bebdd2a8 --- /dev/null +++ b/pkg/cloudkittyapi/volumes.go @@ -0,0 +1,54 @@ +package cloudkittyapi + +import ( + "github.com/openstack-k8s-operators/telemetry-operator/pkg/cloudkitty" + corev1 "k8s.io/api/core/v1" +) + +// GetVolumes - +func GetVolumes(parentName string, name string) []corev1.Volume { + var config0644AccessMode int32 = 0644 + + volumes := []corev1.Volume{ + { + Name: "config-data-custom", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + DefaultMode: &config0644AccessMode, + SecretName: name + "-config-data", + }, + }, + }, + { + Name: "logs", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{Medium: ""}, + }, + }, + } + + return append(cloudkitty.GetVolumes(parentName), volumes...) +} + +// GetVolumeMounts - CloudKitty API VolumeMounts +func GetVolumeMounts() []corev1.VolumeMount { + volumeMounts := []corev1.VolumeMount{ + { + Name: "config-data-custom", + MountPath: "/var/lib/openstack/service-config/", + ReadOnly: true, + }, + GetLogVolumeMount(), + } + + return append(cloudkitty.GetVolumeMounts(cloudkitty.ServiceName+"-api"), volumeMounts...) +} + +// GetLogVolumeMount - CloudKitty API LogVolumeMount +func GetLogVolumeMount() corev1.VolumeMount { + return corev1.VolumeMount{ + Name: "logs", + MountPath: "/var/log/cloudkitty", + ReadOnly: false, + } +} diff --git a/pkg/cloudkittyproc/const.go b/pkg/cloudkittyproc/const.go new file mode 100644 index 00000000..c8fda2aa --- /dev/null +++ b/pkg/cloudkittyproc/const.go @@ -0,0 +1,22 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cloudkittyproc provides CloudKitty processor service configuration and management utilities +package cloudkittyproc + +const ( + // ComponentName - + ComponentName = "cloudkitty-proc" +) diff --git a/pkg/cloudkittyproc/statefulset.go b/pkg/cloudkittyproc/statefulset.go new file mode 100644 index 00000000..b68b67f6 --- /dev/null +++ b/pkg/cloudkittyproc/statefulset.go @@ -0,0 +1,136 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cloudkittyproc + +import ( + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/common/env" + telemetryv1 "github.com/openstack-k8s-operators/telemetry-operator/api/v1beta1" + "github.com/openstack-k8s-operators/telemetry-operator/pkg/cloudkitty" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +const ( + // ServiceCommand - + ServiceCommand = "/usr/local/bin/kolla_start" + // CloudKittyHCScript is the path to the health check script + CloudKittyHCScript = "/var/lib/openstack/bin/healthcheck.py" +) + +// StatefulSet func +func StatefulSet( + instance *telemetryv1.CloudKittyProc, + configHash string, + labels map[string]string, + annotations map[string]string, + topology *topologyv1.Topology, +) *appsv1.StatefulSet { + runAsUser := int64(cloudkitty.CloudKittyUserID) + + // TODO until we determine how to properly query for these + livenessProbe := &corev1.Probe{ + // TODO might need tuning + TimeoutSeconds: 5, + PeriodSeconds: 5, + InitialDelaySeconds: 3, + } + + args := []string{"-c", ServiceCommand} + //var probeCommand string + + //probeCommand = "/usr/local/bin/kolla_set_configs && /var/lib/openstack/bin/healthcheck.py --config-dir /etc/cloudkitty/cloudkitty.conf.d/" + + livenessProbe.Exec = &corev1.ExecAction{ + Command: []string{ + "/usr/bin/python3", + CloudKittyHCScript, + }, + } + + envVars := map[string]env.Setter{} + envVars["KOLLA_CONFIG_STRATEGY"] = env.SetValue("COPY_ALWAYS") + envVars["CONFIG_HASH"] = env.SetValue(configHash) + + volumes := GetVolumes(cloudkitty.GetOwningCloudKittyName(instance), instance.Name) + volumeMounts := GetVolumeMounts() + + // Add the CA bundle + if instance.Spec.TLS.CaBundleSecretName != "" { + volumes = append(volumes, instance.Spec.TLS.CreateVolume()) + volumeMounts = append(volumeMounts, instance.Spec.TLS.CreateVolumeMounts(nil)...) + } + + statefulset := &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + Namespace: instance.Namespace, + Labels: labels, + }, + Spec: appsv1.StatefulSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Replicas: instance.Spec.Replicas, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + Labels: labels, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: instance.Spec.ServiceAccount, + Containers: []corev1.Container{ + { + Name: ComponentName, + Command: []string{ + "/bin/bash", + }, + Args: args, + Image: instance.Spec.ContainerImage, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &runAsUser, + RunAsNonRoot: ptr.To(true), + }, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + VolumeMounts: volumeMounts, + Resources: instance.Spec.Resources, + LivenessProbe: livenessProbe, + }, + }, + Volumes: volumes, + }, + }, + }, + } + + if instance.Spec.NodeSelector != nil { + statefulset.Spec.Template.Spec.NodeSelector = *instance.Spec.NodeSelector + } + + if topology != nil { + topology.ApplyTo(&statefulset.Spec.Template) + } else { + // If possible two pods of the same service should not + // run on the same worker node. If this is not possible + // the get still created on the same worker node. + statefulset.Spec.Template.Spec.Affinity = cloudkitty.GetPodAffinity(ComponentName) + } + + return statefulset +} diff --git a/pkg/cloudkittyproc/volumes.go b/pkg/cloudkittyproc/volumes.go new file mode 100644 index 00000000..b2b9c071 --- /dev/null +++ b/pkg/cloudkittyproc/volumes.go @@ -0,0 +1,38 @@ +package cloudkittyproc + +import ( + "github.com/openstack-k8s-operators/telemetry-operator/pkg/cloudkitty" + corev1 "k8s.io/api/core/v1" +) + +// GetVolumes - +func GetVolumes(parentName string, name string) []corev1.Volume { + var config0644AccessMode int32 = 0644 + + volumes := []corev1.Volume{ + { + Name: "config-data-custom", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + DefaultMode: &config0644AccessMode, + SecretName: name + "-config-data", + }, + }, + }, + } + + return append(cloudkitty.GetVolumes(parentName), volumes...) +} + +// GetVolumeMounts - CloudKitty API VolumeMounts +func GetVolumeMounts() []corev1.VolumeMount { + volumeMounts := []corev1.VolumeMount{ + { + Name: "config-data-custom", + MountPath: "/var/lib/openstack/service-config/", + ReadOnly: true, + }, + } + + return append(cloudkitty.GetVolumeMounts(cloudkitty.ServiceName+"-proc"), volumeMounts...) +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 111d9882..0461d056 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -19,14 +19,35 @@ package utils //nolint:revive // utils is a legitimate package name for utility import ( "context" + "fmt" k8s_errors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/source" "github.com/openstack-k8s-operators/lib-common/modules/common/helper" ) +// ConditionalWatchingReconciler is a reconciler that can conditionally watch resources +type ConditionalWatchingReconciler struct { + client.Client + Kclient kubernetes.Interface + Scheme *runtime.Scheme + Controller controller.Controller + Watching []string + RESTMapper meta.RESTMapper + Cache cache.Cache +} + // EnsureDeleted - Delete the object which in turn will clean the sub resources func EnsureDeleted(ctx context.Context, helper *helper.Helper, obj client.Object) (ctrl.Result, error) { key := client.ObjectKeyFromObject(obj) @@ -45,3 +66,41 @@ func EnsureDeleted(ctx context.Context, helper *helper.Helper, obj client.Object return ctrl.Result{}, nil } + +// EnsureWatches ensures that a watch is set up for a given resource +func EnsureWatches( + _ context.Context, + r *ConditionalWatchingReconciler, + name string, + kind client.Object, + handler handler.EventHandler, + helper *helper.Helper, +) error { + Log := helper.GetLogger() + for _, item := range r.Watching { + if item == name { + // We are already watching the resource + return nil + } + } + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apiextensions.k8s.io", + Kind: "CustomResourceDefinition", + Version: "v1", + }) + + err := r.Get(context.Background(), client.ObjectKey{ + Name: name, + }, u) + if err != nil { + return err + } + + Log.Info(fmt.Sprintf("Starting to watch %s", name)) + err = r.Controller.Watch(source.Kind(r.Cache, kind, handler)) + if err == nil { + r.Watching = append(r.Watching, name) + } + return err +} diff --git a/templates/cloudkitty/bin/healthcheck.py b/templates/cloudkitty/bin/healthcheck.py new file mode 100755 index 00000000..a1110d7a --- /dev/null +++ b/templates/cloudkitty/bin/healthcheck.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2025 Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import psutil + + +def check_process() -> tuple[int, str]: + # Return 0 if cloudkitty-processor process with given cmdline exists, else 1 with reason. + for proc in psutil.process_iter(attrs=["name", "cmdline"]): + try: + cmdline = proc.info.get("cmdline", []) + if cmdline and any("cloudkitty-processor" in arg for arg in cmdline): + return 0, "" + except (psutil.NoSuchProcess, psutil.AccessDenied): + continue + return 1, "CloudKitty processor process not found" + + +def run_checks() -> tuple[int, str]: + # Run all health checks and return aggregated result + checks = [check_process] + for check in checks: + rc, reason = check() + if rc != 0: + return rc, reason + return 0, "" + + +if __name__ == "__main__": + try: + rc, reason = run_checks() + except Exception as ex: + rc, reason = 2, f"Unknown error: {ex}" + + if rc != 0: + print(reason) + sys.exit(rc) diff --git a/templates/cloudkitty/bin/run-on-host b/templates/cloudkitty/bin/run-on-host new file mode 100755 index 00000000..e7840ace --- /dev/null +++ b/templates/cloudkitty/bin/run-on-host @@ -0,0 +1,2 @@ +#!/bin/sh +exec nsenter -a -t 1 -- `realpath -s $0` "$@" diff --git a/templates/cloudkitty/config/cloudkitty-api-config.json b/templates/cloudkitty/config/cloudkitty-api-config.json new file mode 100644 index 00000000..2c691726 --- /dev/null +++ b/templates/cloudkitty/config/cloudkitty-api-config.json @@ -0,0 +1,105 @@ +{ + "command": "/usr/sbin/httpd -DFOREGROUND -E /dev/stdout", + "config_files": [ + { + "source": "/var/lib/openstack/service-config/cloudkitty.conf", + "dest": "/etc/cloudkitty/cloudkitty.conf.d/00-cloudkitty.conf", + "owner": "cloudkitty", + "perm": "0600" + }, + { + "source": "/var/lib/openstack/config/metrics.yaml", + "dest": "/etc/cloudkitty/metrics.yaml", + "owner": "cloudkitty", + "perm": "0600" + }, + { + "source": "/var/lib/openstack/config/metrics.yaml", + "dest": "/etc/cloudkitty/metrics.yaml", + "owner": "cloudkitty", + "perm": "0600" + }, + { + "source": "/var/lib/openstack/service-config/0*.conf", + "dest": "/etc/cloudkitty/cloudkitty.conf.d/", + "owner": "cloudkitty", + "perm": "0600", + "optional": true + }, + { + "source": "/var/lib/openstack/config/metrics.yaml", + "dest": "/etc/cloudkitty/metrics.yaml", + "owner": "cloudkitty", + "perm": "0600" + }, + { + "source": "/var/lib/openstack/config/wsgi-cloudkitty.conf", + "dest": "/etc/httpd/conf.d/00wsgi-cloudkitty.conf", + "owner": "cloudkitty", + "perm": "0644" + }, + { + "source": "/var/lib/openstack/config/httpd.conf", + "dest": "/etc/httpd/conf/httpd.conf", + "owner": "cloudkitty", + "perm": "0644" + }, + { + "source": "/var/lib/openstack/config/ssl.conf", + "dest": "/etc/httpd/conf.d/ssl.conf", + "owner": "cloudkitty", + "perm": "0644" + }, + { + "source": "/var/lib/config-data/tls/certs/*", + "dest": "/etc/pki/tls/certs/", + "owner": "cloudkitty", + "perm": "0640", + "optional": true, + "merge": true + }, + { + "source": "/var/lib/config-data/tls/private/*", + "dest": "/etc/pki/tls/private/", + "owner": "cloudkitty", + "perm": "0600", + "optional": true, + "merge": true + }, + { + "source": "/var/lib/config-data/mtls/certs/*", + "dest": "/etc/pki/tls/certs/", + "owner": "cloudkitty:cloudkitty", + "perm": "0640", + "optional": true, + "merge": true + }, + { + "source": "/var/lib/config-data/mtls/private/*", + "dest": "/etc/pki/tls/private/", + "owner": "cloudkitty:cloudkitty", + "perm": "0640", + "optional": true, + "merge": true + }, + { + "source": "/var/lib/openstack/loki-certs/*", + "dest": "/etc/cloudkitty/certs/", + "owner": "cloudkitty:cloudkitty", + "perm": "0400", + "merge": true + } + ], + "permissions": [ + { + "path": "/var/log/cloudkitty", + "owner": "cloudkitty:apache", + "recurse": true + }, + { + "path": "/etc/httpd/run", + "owner": "cloudkitty:apache", + "recurse": true + } + ] +} diff --git a/templates/cloudkitty/config/cloudkitty-dbsync-config.json b/templates/cloudkitty/config/cloudkitty-dbsync-config.json new file mode 100644 index 00000000..1eb4f0a6 --- /dev/null +++ b/templates/cloudkitty/config/cloudkitty-dbsync-config.json @@ -0,0 +1,11 @@ +{ + "command": "/usr/bin/cloudkitty-dbsync upgrade", + "config_files": [ + { + "source": "/var/lib/openstack/config/cloudkitty.conf", + "dest": "/etc/cloudkitty/cloudkitty.conf", + "owner": "cloudkitty", + "perm": "0600" + } + ] +} diff --git a/templates/cloudkitty/config/cloudkitty-proc-config.json b/templates/cloudkitty/config/cloudkitty-proc-config.json new file mode 100644 index 00000000..d27258dc --- /dev/null +++ b/templates/cloudkitty/config/cloudkitty-proc-config.json @@ -0,0 +1,31 @@ +{ + "command": "/usr/bin/cloudkitty-processor --logfile /dev/stdout", + "config_files": [ + { + "source": "/var/lib/openstack/service-config/cloudkitty.conf", + "dest": "/etc/cloudkitty/cloudkitty.conf.d/00-cloudkitty.conf", + "owner": "cloudkitty", + "perm": "0600" + }, + { + "source": "/var/lib/openstack/service-config/0*.conf", + "dest": "/etc/cloudkitty/cloudkitty.conf.d/", + "owner": "cloudkitty", + "perm": "0600", + "optional": true + }, + { + "source": "/var/lib/openstack/config/metrics.yaml", + "dest": "/etc/cloudkitty/metrics.yaml", + "owner": "cloudkitty", + "perm": "0600" + }, + { + "source": "/var/lib/openstack/loki-certs/*", + "dest": "/etc/cloudkitty/certs/", + "owner": "cloudkitty:cloudkitty", + "perm": "0400", + "merge": true + } + ] +} diff --git a/templates/cloudkitty/config/cloudkitty-storageinit-config.json b/templates/cloudkitty/config/cloudkitty-storageinit-config.json new file mode 100644 index 00000000..3d9561c5 --- /dev/null +++ b/templates/cloudkitty/config/cloudkitty-storageinit-config.json @@ -0,0 +1,11 @@ +{ + "command": "/usr/bin/cloudkitty-storage-init", + "config_files": [ + { + "source": "/var/lib/openstack/config/cloudkitty.conf", + "dest": "/etc/cloudkitty/cloudkitty.conf", + "owner": "cloudkitty", + "perm": "0600" + } + ] +} diff --git a/templates/cloudkitty/config/cloudkitty.conf b/templates/cloudkitty/config/cloudkitty.conf new file mode 100644 index 00000000..80ba122e --- /dev/null +++ b/templates/cloudkitty/config/cloudkitty.conf @@ -0,0 +1,84 @@ +[oslo_policy] +policy_file = policy.yaml + +[DEFAULT] +auth_strategy = keystone +debug = True +notification_topics = notifications +transport_url = {{ .TransportURL }} + +[authinfos] +debug = True +project_domain_name = default +user_domain_name = default +region_name = RegionOne +tenant_name = service +project_name = service +password = {{ .ServicePassword }} +username = {{ .ServiceUser }} +identity_uri = {{ .KeystoneInternalURL }} +auth_url = {{ .KeystoneInternalURL }} +auth_protocol = http +auth_type = v3password +{{- if .TLS }} +cafile = {{ .CAFile }} +{{- end }} + +[fetcher] +backend = keystone + +[fetcher_keystone] +auth_section = authinfos +ignore_rating_role = True + +[collect] +period = {{ .Period }} +wait_periods = 0 +metrics_conf = /etc/cloudkitty/metrics.yaml +collector = prometheus +scope_key = project + +[collector_prometheus] +{{- if .PrometheusTLS }} +prometheus_url = https://{{ .PrometheusHost }}:{{ .PrometheusPort }}/api/v1 +cafile = {{ .PrometheusCAFile }} +insecure = false +{{- else }} +prometheus_url = http://{{ .PrometheusHost }}:{{ .PrometheusPort }}/api/v1 +insecure = true +{{- end }} + +[output] +pipeline = osrf +basepath = /opt/stack/data/cloudkitty/reports +backend = cloudkitty.backend.file.FileBackend + +[storage] +version = 2 +backend = loki + +[storage_loki] +url = https://{{ .LokiHost }}:{{ .LokiPort }}/api/logs/v1/cloudkitty/loki/api/v1 +ca_file = /etc/cloudkitty/certs/service-ca.crt +cert_file = /etc/cloudkitty/certs/tls.crt +key_file = /etc/cloudkitty/certs/tls.key + +[database] +connection = {{ .DatabaseConnection }} + +[keystone_authtoken] +memcached_servers = {{ .MemcachedServersWithInet }} +# memcache_pool_dead_retry = 10 +# memcache_pool_conn_get_timeout = 2 +project_domain_name = Default +project_name = service +user_domain_name = Default +password = {{ .ServicePassword }} +username = {{ .ServiceUser }} +auth_url = {{ .KeystoneInternalURL }} +interface = internal +auth_type = password +{{- if .TLS }} +cafile = {{ .CAFile }} +{{- end }} +# service_token_roles_required = true diff --git a/templates/cloudkitty/config/httpd.conf b/templates/cloudkitty/config/httpd.conf new file mode 100644 index 00000000..c742ea69 --- /dev/null +++ b/templates/cloudkitty/config/httpd.conf @@ -0,0 +1,27 @@ +ServerTokens Prod +ServerSignature Off +TraceEnable Off +ServerRoot "/etc/httpd" +ServerName "cloudkitty.openstack.svc" + +User apache +Group apache + +Listen 8889 + +TypesConfig /etc/mime.types + +Include conf.modules.d/*.conf + +LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined +LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy + +SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded +CustomLog /dev/stdout combined env=!forwarded +CustomLog /dev/stdout proxy env=forwarded +ErrorLog /dev/stdout + +# XXX: To disable SSL +#Include conf.d/*.conf +# If above include is commented include at least the cloudkitty wsgi file +Include conf.d/00wsgi-cloudkitty.conf diff --git a/templates/cloudkitty/config/metrics.yaml b/templates/cloudkitty/config/metrics.yaml new file mode 100644 index 00000000..6bb078af --- /dev/null +++ b/templates/cloudkitty/config/metrics.yaml @@ -0,0 +1,77 @@ +metrics: + ceilometer_cpu: + unit: instance + alt_name: instance + groupby: + - id + - user_id + - project + metadata: + - flavor_name + - flavor_id + - vcpus + mutate: NUMBOOL + extra_args: + aggregation_method: max + + ceilometer_image_size: + unit: MiB + factor: 1/1048576 + groupby: + - id + - user_id + - project + metadata: + - container_format + - disk_format + extra_args: + aggregation_method: max + + ceilometer_volume_size: + unit: GiB + groupby: + - id + - user_id + - project + metadata: + - volume_type + extra_args: + aggregation_method: max + + ceilometer_network_outgoing_bytes_rate: + unit: MB + groupby: + - id + - project + - user_id + # Converting B/s to MB/h + factor: 3600/1000000 + metadata: + - instance_id + extra_args: + aggregation_method: max + + ceilometer_network_incoming_bytes_rate: + unit: MB + groupby: + - id + - project + - user_id + # Converting B/s to MB/h + factor: 3600/1000000 + metadata: + - instance_id + extra_args: + aggregation_method: max + + ceilometer_ip_floating: + unit: ip + groupby: + - id + - user_id + - project + metadata: + - state + mutate: NUMBOOL + extra_args: + aggregation_method: max diff --git a/templates/cloudkitty/config/ssl.conf b/templates/cloudkitty/config/ssl.conf new file mode 100644 index 00000000..e3da4ecb --- /dev/null +++ b/templates/cloudkitty/config/ssl.conf @@ -0,0 +1,21 @@ + + SSLRandomSeed startup builtin + SSLRandomSeed startup file:/dev/urandom 512 + SSLRandomSeed connect builtin + SSLRandomSeed connect file:/dev/urandom 512 + + AddType application/x-x509-ca-cert .crt + AddType application/x-pkcs7-crl .crl + + SSLPassPhraseDialog builtin + SSLSessionCache "shmcb:/var/cache/mod_ssl/scache(512000)" + SSLSessionCacheTimeout 300 + Mutex default + SSLCryptoDevice builtin + SSLHonorCipherOrder On + SSLUseStapling Off + SSLStaplingCache "shmcb:/run/httpd/ssl_stapling(32768)" + SSLCipherSuite HIGH:MEDIUM:!aNULL:!MD5:!RC4:!3DES + SSLProtocol all -SSLv2 -SSLv3 -TLSv1 + SSLOptions StdEnvVars + diff --git a/templates/cloudkitty/config/wsgi-cloudkitty.conf b/templates/cloudkitty/config/wsgi-cloudkitty.conf new file mode 100644 index 00000000..7559d46c --- /dev/null +++ b/templates/cloudkitty/config/wsgi-cloudkitty.conf @@ -0,0 +1,40 @@ +{{ range $endpt, $vhost := .VHosts }} +# {{ $endpt }} vhost {{ $vhost.ServerName }} configuration + + ServerName {{ $vhost.ServerName }} + + ## Vhost docroot + DocumentRoot "/var/www/cgi-bin/cloudkitty" + + ## Directories, there should at least be a declaration for /var/www/cgi-bin/cloudkitty + + + Options -Indexes +FollowSymLinks +MultiViews + AllowOverride None + Require all granted + + + Timeout {{ $.TimeOut }} + + ## Logging + ErrorLog /dev/stdout + ServerSignature Off + CustomLog /dev/stdout combined + +{{- if $vhost.TLS }} + SetEnvIf X-Forwarded-Proto https HTTPS=1 + + ## SSL directives + SSLEngine on + SSLCertificateFile "{{ $vhost.SSLCertificateFile }}" + SSLCertificateKeyFile "{{ $vhost.SSLCertificateKeyFile }}" +{{- end }} + + ## WSGI configuration + WSGIApplicationGroup %{GLOBAL} + WSGIDaemonProcess {{ $endpt }} display-name={{ $endpt }} group=cloudkitty processes=4 threads=1 user=cloudkitty + WSGIProcessGroup {{ $endpt }} + WSGIScriptAlias / "/usr/bin/cloudkitty-api" + WSGIPassAuthorization On + +{{ end }} diff --git a/tests/kuttl/suites/cloudkitty/config.yaml b/tests/kuttl/suites/cloudkitty/config.yaml new file mode 100644 index 00000000..bd1d84da --- /dev/null +++ b/tests/kuttl/suites/cloudkitty/config.yaml @@ -0,0 +1,14 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestSuite +reportFormat: JSON +reportName: kuttl-cloudkitty-results +namespace: telemetry-kuttl-cloudkitty +# we could set this lower, but the initial image pull can take a while +timeout: 300 +parallel: 1 +skipDelete: true +testDirs: + - tests/kuttl/suites/cloudkitty/ +suppress: + - events +artifactsDir: tests/kuttl/suites/cloudkitty/output diff --git a/tests/kuttl/suites/cloudkitty/deps/OpenStackControlPlane.yaml b/tests/kuttl/suites/cloudkitty/deps/OpenStackControlPlane.yaml new file mode 100644 index 00000000..f1b9f207 --- /dev/null +++ b/tests/kuttl/suites/cloudkitty/deps/OpenStackControlPlane.yaml @@ -0,0 +1,27 @@ +apiVersion: core.openstack.org/v1beta1 +kind: OpenStackControlPlane +metadata: + name: openstack +spec: + storageClass: "crc-csi-hostpath-provisioner" + keystone: + template: + databaseInstance: openstack + secret: osp-secret + ironic: + enabled: false + template: + ironicConductors: [] + manila: + enabled: false + template: + manilaShares: {} + horizon: + enabled: false + nova: + enabled: false + placement: + template: + databaseInstance: openstack + secret: osp-secret + dataplane: diff --git a/tests/kuttl/suites/cloudkitty/deps/infra.yaml b/tests/kuttl/suites/cloudkitty/deps/infra.yaml new file mode 100644 index 00000000..7c47e716 --- /dev/null +++ b/tests/kuttl/suites/cloudkitty/deps/infra.yaml @@ -0,0 +1,42 @@ +apiVersion: core.openstack.org/v1beta1 +kind: OpenStackControlPlane +metadata: + name: openstack +spec: + mariadb: + enabled: false + templates: + openstack: + replicas: 0 + openstack-cell1: + replicas: 0 + galera: + enabled: true + templates: + openstack: + replicas: 1 + storageRequest: 500M + openstack-cell1: + replicas: 1 + storageRequest: 500M + secret: osp-secret + secret: osp-secret + rabbitmq: + templates: + rabbitmq: + replicas: 1 + image: quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:41c36935b8b8cd3c5e490d1c03549ba2c0e8ddff50238fb2400d74613aa2e087 + rabbitmq-cell1: + replicas: 1 + memcached: + templates: + memcached: + replicas: 1 + ovn: + enabled: false + template: + ovnController: + external-ids: + ovn-encap-type: geneve + ovs: + enabled: false diff --git a/tests/kuttl/suites/cloudkitty/deps/kustomization.yaml b/tests/kuttl/suites/cloudkitty/deps/kustomization.yaml new file mode 100644 index 00000000..b366cb73 --- /dev/null +++ b/tests/kuttl/suites/cloudkitty/deps/kustomization.yaml @@ -0,0 +1,50 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +namespace: telemetry-kuttl-cloudkitty + +secretGenerator: +- literals: + - AdminPassword=password + - DbRootPassword=password + - DatabasePassword=password + - KeystoneDatabasePassword=password + - PlacementPassword=password + - PlacementDatabasePassword=password + - GlancePassword=password + - GlanceDatabasePassword=password + - NeutronPassword=password + - NeutronDatabasePassword=password + - NovaPassword=password + - NovaAPIDatabasePassword=password + - NovaCell0DatabasePassword=password + - NovaCell1DatabasePassword=password + - AodhPassword=password + - AodhDatabasePassword=password + - CeilometerPassword=password + - CeilometerDatabasePassword=password + - HeatPassword=password + - HeatDatabasePassword=password + - HeatAuthEncryptionKey=66699966699966600666999666999666 + - MetadataSecret=42 + - CloudKittyPassword=password + name: osp-secret +generatorOptions: + disableNameSuffixHash: true + labels: + type: osp-secret + +resources: +- namespace.yaml +- OpenStackControlPlane.yaml +- loki-s3-secret.yaml + +patches: +- patch: |- + apiVersion: core.openstack.org/v1beta1 + kind: OpenStackControlPlane + metadata: + name: openstack + spec: + secret: osp-secret +- path: infra.yaml +- path: telemetry.yaml diff --git a/tests/kuttl/suites/cloudkitty/deps/loki-operator.yaml b/tests/kuttl/suites/cloudkitty/deps/loki-operator.yaml new file mode 100644 index 00000000..5b3d937a --- /dev/null +++ b/tests/kuttl/suites/cloudkitty/deps/loki-operator.yaml @@ -0,0 +1,27 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: openshift-operators-redhat + labels: + name: openshift-operators-redhat +--- +apiVersion: operators.coreos.com/v1 +kind: OperatorGroup +metadata: + name: loki-operator + namespace: openshift-operators-redhat +spec: + upgradeStrategy: Default +--- +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: loki-operator + namespace: openshift-operators-redhat +spec: + channel: stable-6.1 + installPlanApproval: Automatic + name: loki-operator + source: redhat-operators + sourceNamespace: openshift-marketplace diff --git a/tests/kuttl/suites/cloudkitty/deps/loki-s3-secret.yaml b/tests/kuttl/suites/cloudkitty/deps/loki-s3-secret.yaml new file mode 100644 index 00000000..b176d7f9 --- /dev/null +++ b/tests/kuttl/suites/cloudkitty/deps/loki-s3-secret.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: logging-loki-s3 +stringData: + access_key_id: minio + access_key_secret: minio123 + bucketnames: loki + endpoint: http://minio.minio-dev.svc.cluster.local:9000 diff --git a/tests/kuttl/suites/cloudkitty/deps/minio.yaml b/tests/kuttl/suites/cloudkitty/deps/minio.yaml new file mode 100644 index 00000000..f1854ff4 --- /dev/null +++ b/tests/kuttl/suites/cloudkitty/deps/minio.yaml @@ -0,0 +1,99 @@ +--- +# Deploys a new Namespace for the MinIO Pod +apiVersion: v1 +kind: Namespace +metadata: + name: minio-dev # Change this value if you want a different namespace name + labels: + name: minio-dev # Change this value to match metadata.name +--- +# Deploys a new MinIO Pod into the metadata.namespace Kubernetes namespace +# +apiVersion: v1 +kind: Pod +metadata: + labels: + app: minio + name: minio + namespace: minio-dev # Change this value to match the namespace metadata.name +spec: + containers: + - name: minio + image: quay.io/minio/minio:latest + command: + - /bin/bash + - -c + - | + mkdir -p /data/loki && \ + minio server /data + env: + - name: MINIO_ACCESS_KEY + value: minio + - name: MINIO_SECRET_KEY + value: minio123 + volumeMounts: + - mountPath: /data + name: storage # Corresponds to the `spec.volumes` Persistent Volume + volumes: + - name: storage + persistentVolumeClaim: + claimName: minio-pvc +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: minio-pvc + namespace: minio-dev +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + storageClassName: crc-csi-hostpath-provisioner +--- +apiVersion: v1 +kind: Service +metadata: + name: minio + namespace: minio-dev +spec: + selector: + app: minio + ports: + - name: api + protocol: TCP + port: 9000 + - name: console + protocol: TCP + port: 9090 +--- +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + name: minio-console + namespace: minio-dev +spec: + host: console-minio-dev.apps-crc.testing + to: + kind: Service + name: minio + weight: 100 + port: + targetPort: console + wildcardPolicy: None +--- +kind: Route +apiVersion: route.openshift.io/v1 +metadata: + name: minio-api + namespace: minio-dev +spec: + host: api-minio-dev.apps-crc.testing + to: + kind: Service + name: minio + weight: 100 + port: + targetPort: api + wildcardPolicy: None diff --git a/tests/kuttl/suites/cloudkitty/deps/namespace.yaml b/tests/kuttl/suites/cloudkitty/deps/namespace.yaml new file mode 100644 index 00000000..63c85762 --- /dev/null +++ b/tests/kuttl/suites/cloudkitty/deps/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: telemetry-kuttl diff --git a/tests/kuttl/suites/cloudkitty/deps/telemetry.yaml b/tests/kuttl/suites/cloudkitty/deps/telemetry.yaml new file mode 100644 index 00000000..5c2714a2 --- /dev/null +++ b/tests/kuttl/suites/cloudkitty/deps/telemetry.yaml @@ -0,0 +1,7 @@ +apiVersion: core.openstack.org/v1beta1 +kind: OpenStackControlPlane +metadata: + name: openstack +spec: + telemetry: + enabled: false diff --git a/tests/kuttl/suites/cloudkitty/output/.keep b/tests/kuttl/suites/cloudkitty/output/.keep new file mode 100644 index 00000000..e69de29b diff --git a/tests/kuttl/suites/cloudkitty/tests/00-deps.yaml b/tests/kuttl/suites/cloudkitty/tests/00-deps.yaml new file mode 100644 index 00000000..f850b416 --- /dev/null +++ b/tests/kuttl/suites/cloudkitty/tests/00-deps.yaml @@ -0,0 +1,9 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + oc apply -f ../deps/loki-operator.yaml + until oc api-resources | grep -q grafana; do sleep 1; done + - script: | + oc apply -f ../deps/minio.yaml + oc wait --for='jsonpath={.status.conditions[?(@.type=="Ready")].status}=True' pod/minio -n minio-dev diff --git a/tests/kuttl/suites/cloudkitty/tests/01-assert.yaml b/tests/kuttl/suites/cloudkitty/tests/01-assert.yaml new file mode 100644 index 00000000..a9e56416 --- /dev/null +++ b/tests/kuttl/suites/cloudkitty/tests/01-assert.yaml @@ -0,0 +1,197 @@ +apiVersion: loki.grafana.com/v1 +kind: LokiStack +metadata: + name: telemetry-kuttl-cloudkitty-lokistack + ownerReferences: + - kind: CloudKitty + name: telemetry-kuttl-cloudkitty +spec: + tenants: + authentication: + - tenantId: telemetry-kuttl-cloudkitty + tenantName: telemetry-kuttl-cloudkitty +# NOTE: It's hard to assert LokiStack condition with kuttl-tests, because +# their number can change even when LokiStack as whole is actually ready. +# And because kuttl-tests will compare the number of conditions defined +# here vs. the number of conditions inside the real CR, it could fail +# even when it shouldn't. +# But LokiStack condition is being copied into the master CloudKitty CR, +# so we can ensure that LokiStack is ready by checking that Cloudkitty +# is Ready +--- +apiVersion: telemetry.openstack.org/v1beta1 +kind: CloudKitty +metadata: + name: telemetry-kuttl-cloudkitty +status: + conditions: + - status: "True" + type: Ready + - status: "True" + type: CloudKittyAPIReady + - status: "True" + type: CloudKittyClientCertReady + - status: "True" + type: CloudKittyLokiStackReady + - status: "True" + type: CloudKittyProcReady + - status: "True" + type: CloudKittyStorageInitReady + - status: "True" + type: DBReady + - status: "True" + type: DBSyncReady + - status: "True" + type: InputReady + - status: "True" + type: MariaDBAccountReady + - status: "True" + type: MemcachedReady + - status: "True" + type: NetworkAttachmentsReady + - status: "True" + type: RabbitMqTransportURLReady + - status: "True" + type: RoleBindingReady + - status: "True" + type: RoleReady + - status: "True" + type: ServiceAccountReady + - status: "True" + type: ServiceConfigReady +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: cert-cloudkitty-client-internal + ownerReferences: + - kind: CloudKitty + name: telemetry-kuttl-cloudkitty +spec: + subject: + organizationalUnits: + - telemetry-kuttl-cloudkitty + usages: + - digital signature + - key encipherment + - client auth +status: + conditions: + - status: "True" + type: Ready +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + component: cloudkitty-api + service: cloudkitty + name: telemetry-kuttl-cloudkitty-api + ownerReferences: + - kind: CloudKittyAPI + name: telemetry-kuttl-cloudkitty-api +spec: + replicas: 1 + selector: + matchLabels: + component: cloudkitty-api + service: cloudkitty +status: + availableReplicas: 1 + currentReplicas: 1 + readyReplicas: 1 + replicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + component: cloudkitty-api + service: cloudkitty + name: telemetry-kuttl-cloudkitty-api-0 + ownerReferences: + - kind: StatefulSet + name: telemetry-kuttl-cloudkitty-api +spec: + containers: + - name: cloudkitty-api + hostname: telemetry-kuttl-cloudkitty-api-0 +status: + containerStatuses: + - name: cloudkitty-api + ready: true + started: true +--- +apiVersion: v1 +kind: Service +metadata: + labels: + component: cloudkitty-api + service: cloudkitty + name: cloudkitty-internal + ownerReferences: + - kind: CloudKittyAPI + name: telemetry-kuttl-cloudkitty-api +spec: + ports: + - port: 8889 + protocol: TCP + targetPort: 8889 +--- +apiVersion: v1 +kind: Service +metadata: + labels: + component: cloudkitty-api + service: cloudkitty + name: cloudkitty-public + ownerReferences: + - kind: CloudKittyAPI + name: telemetry-kuttl-cloudkitty-api +spec: + ports: + - port: 8889 + protocol: TCP + targetPort: 8889 +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + component: cloudkitty-proc + service: cloudkitty + name: telemetry-kuttl-cloudkitty-proc + ownerReferences: + - kind: CloudKittyProc + name: telemetry-kuttl-cloudkitty-proc +spec: + replicas: 1 + selector: + matchLabels: + component: cloudkitty-proc + service: cloudkitty +status: + availableReplicas: 1 + currentReplicas: 1 + readyReplicas: 1 + replicas: 1 +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + component: cloudkitty-proc + service: cloudkitty + name: telemetry-kuttl-cloudkitty-proc-0 + ownerReferences: + - kind: StatefulSet + name: telemetry-kuttl-cloudkitty-proc +spec: + containers: + - name: cloudkitty-processor + hostname: telemetry-kuttl-cloudkitty-proc-0 +status: + containerStatuses: + - name: cloudkitty-processor + ready: true + started: true diff --git a/tests/kuttl/suites/cloudkitty/tests/01-deploy.yaml b/tests/kuttl/suites/cloudkitty/tests/01-deploy.yaml new file mode 100644 index 00000000..b9c06e71 --- /dev/null +++ b/tests/kuttl/suites/cloudkitty/tests/01-deploy.yaml @@ -0,0 +1,51 @@ +apiVersion: telemetry.openstack.org/v1beta1 +kind: CloudKitty +metadata: + name: telemetry-kuttl-cloudkitty +spec: + apiTimeout: 0 + cloudKittyAPI: + override: + service: + internal: + metadata: + labels: + osctlplane: "" + osctlplane-service: telemetry + public: + metadata: + labels: + osctlplane: "" + osctlplane-service: telemetry + replicas: 1 + resources: {} + tls: + api: + internal: {} + public: {} + caBundleSecretName: combined-ca-bundle + cloudKittyProc: + replicas: 1 + resources: {} + tls: + caBundleSecretName: combined-ca-bundle + databaseAccount: cloudkitty + databaseInstance: openstack + memcachedInstance: memcached + passwordSelector: + aodhService: AodhPassword + ceilometerService: CeilometerPassword + cloudKittyService: CloudKittyPassword + period: 300 + preserveJobs: false + rabbitMqClusterName: rabbitmq + s3StorageConfig: + schemas: + - effectiveDate: "2024-11-18" + version: v13 + secret: + name: logging-loki-s3 + type: s3 + secret: osp-secret + serviceUser: cloudkitty + storageClass: local-storage diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml index 1a85a567..637ff3d7 100644 --- a/zuul.d/projects.yaml +++ b/zuul.d/projects.yaml @@ -104,6 +104,28 @@ - "@{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/telemetry-operator'].src_dir }}/ci/vars-power-monitoring.yml" irrelevant-files: *irrelevant_files +- job: + name: telemetry-operator-multinode-cloudkitty + dependencies: ["telemetry-openstack-meta-content-provider-master"] + parent: telemetry-operator-multinode-autoscaling + description: | + Deploy CloudKitty and run tempest tests + required-projects: + - name: infrawatch/feature-verification-tests + override-checkout: master + extra-vars: *mcp_extra_vars + vars: + #patch_observabilityclient: true + cifmw_update_containers: false + cifmw_extras: + - "@{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/ci-framework'].src_dir }}/scenarios/centos-9/multinode-ci.yml" + # Need a config for CK + - "@{{ ansible_user_dir }}/{{ zuul.projects['github.com/openstack-k8s-operators/telemetry-operator'].src_dir }}/ci/vars-cloudkitty-tempest.yml" + - "@{{ ansible_user_dir }}/{{ zuul.projects['github.com/infrawatch/feature-verification-tests'].src_dir }}/ci/vars-use-master-containers.yml" + roles: + - zuul: github.com/openstack-k8s-operators/ci-framework + irrelevant-files: *irrelevant_files + - project-template: name: rdo-telemetry-tempest-plugin-jobs openstack-experimental: @@ -133,25 +155,30 @@ - project: name: openstack-k8s-operators/telemetry-operator - templates: - - podified-multinode-edpm-pipeline + #templates: + # - podified-multinode-edpm-pipeline github-check: + #debug: true jobs: + - telemetry-operator-multinode-cloudkitty: + dependencies: + - telemetry-openstack-meta-content-provider-master - telemetry-openstack-meta-content-provider-master - - telemetry-operator-multinode-default-telemetry - - functional-tests-osp18: &fvt_jobs_config - voting: true - required-projects: - - name: infrawatch/feature-verification-tests - override-checkout: master - irrelevant-files: *irrelevant_files - - feature-verification-tests-noop: - files: *irrelevant_files - - functional-periodic-telemetry-with-ceph: - required-projects: - - name: infrawatch/feature-verification-tests - override-checkout: master - files: - - ci/deploy-telemetry-with-ceph.yml - - ci/vars-telemetry-with-ceph.yml - - zuul.d/projects.yaml + #- telemetry-operator-multinode-default-telemetry + #- functional-tests-osp18: &fvt_jobs_config + # voting: true + # required-projects: + # - name: infrawatch/feature-verification-tests + # override-checkout: master + # irrelevant-files: *irrelevant_files + #- feature-verification-tests-noop: + # files: *irrelevant_files + #- functional-periodic-telemetry-with-ceph: + # required-projects: + # - name: infrawatch/feature-verification-tests + # override-checkout: master + # files: + # - ci/deploy-telemetry-with-ceph.yml + # - ci/vars-telemetry-with-ceph.yml + # - zuul.d/projects.yaml +