diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml index 8e4034b..eb171a0 100644 --- a/.github/workflows/integration_test.yaml +++ b/.github/workflows/integration_test.yaml @@ -7,17 +7,11 @@ jobs: integration-tests: uses: canonical/operator-workflows/.github/workflows/integration_test.yaml@main secrets: inherit + permissions: + contents: read + packages: write with: - chaos-app-label: app.kubernetes.io/name=indico - chaos-enabled: false - chaos-experiments: pod-delete - load-test-enabled: false - load-test-run-args: "-e LOAD_TEST_HOST=localhost" - zap-before-command: "curl -H \"Host: indico.local\" http://localhost/bootstrap --data-raw 'csrf_token=00000000-0000-0000-0000-000000000000&first_name=admin&last_name=admin&email=admin%40admin.com&username=admin&password=lunarlobster&confirm_password=lunarlobster&affiliation=Canonical'" - zap-enabled: true - zap-cmd-options: '-T 60 -z "-addoninstall jython" --hook "/zap/wrk/tests/zap/hook.py"' - zap-target: localhost - zap-target-port: 80 - zap-rules-file-name: "zap_rules.tsv" - trivy-fs-enabled: true - trivy-image-config: "trivy.yaml" + pre-run-script: | + -c "sudo microk8s config > ${GITHUB_WORKSPACE}/kube-config + chmod +x tests/integration/pre_run_script.sh + ./tests/integration/pre_run_script.sh" diff --git a/.gitignore b/.gitignore index 8461f41..4d88396 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,8 @@ __pycache__/ .mypy_cache *.egg-info/ */*.rock +*/*.snap +# charmcraft-related directories +prime/ +parts/ +stage/ diff --git a/.licenserc.yaml b/.licenserc.yaml new file mode 100644 index 0000000..4e10e32 --- /dev/null +++ b/.licenserc.yaml @@ -0,0 +1,36 @@ +header: + license: + spdx-id: Apache-2.0 + copyright-owner: Canonical Ltd. + content: | + Copyright [year] [owner] + See LICENSE file for licensing details. + paths: + - '**' + paths-ignore: + - '.github/**' + - '**/.gitkeep' + - '**/*.cfg' + - '**/*.conf' + - '**/*.j2' + - '**/*.json' + - '**/*.md' + - '**/*.rule' + - '**/*.tmpl' + - '**/*.txt' + - '.codespellignore' + - '.dockerignore' + - '.flake8' + - '.jujuignore' + - '.gitignore' + - '.licenserc.yaml' + - '.trivyignore' + - '.woke.yaml' + - '.woke.yml' + - 'CODEOWNERS' + - 'icon.svg' + - 'LICENSE' + - 'trivy.yaml' + - 'zap_rules.tsv' + - 'lib/**' + comment: on-failure diff --git a/.woke.yaml b/.woke.yaml new file mode 100644 index 0000000..bdd1fed --- /dev/null +++ b/.woke.yaml @@ -0,0 +1,10 @@ +ignore_files: + - lib/* + +rules: + # [2022.11.28] Ignore "slave"/"master" - The relation with Jenkins charm is using this + # terminology due to support for older versions of Jenkins. + - name: slave + - name: master + # Ignore whitelist - we are using it to ignore pydantic in pyptoject.toml + - name: whitelist diff --git a/charmcraft.yaml b/charmcraft.yaml index df6fdcb..6a61d65 100644 --- a/charmcraft.yaml +++ b/charmcraft.yaml @@ -6,8 +6,10 @@ type: charm bases: - build-on: - - name: ubuntu - channel: "22.04" + - name: ubuntu + channel: "22.04" run-on: - - name: ubuntu - channel: "22.04" + - name: ubuntu + channel: "20.04" + - name: ubuntu + channel: "22.04" diff --git a/config.yaml b/config.yaml index dd4dcdd..4e3bf91 100644 --- a/config.yaml +++ b/config.yaml @@ -1,16 +1,9 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. -# This file defines charm config options, and populates the Configure tab on Charmhub. -# If your charm does not require configuration options, delete this file entirely. -# -# See https://juju.is/docs/config for guidance. - options: - # An example config option to customise the log level of the workload - log-level: - description: | - Configures the log level of gunicorn. - - Acceptable values are: "info", "debug", "warning", "error" and "critical" - default: "info" + jenkins_agent_labels: type: string + default: "" + description: | + Comma-separated list of labels to be assigned to the agent in Jenkins. If not set it will + default to the agents hardware identifier, e.g.: 'x86_64' diff --git a/docs/explanation/workload.md b/docs/explanation/workload.md new file mode 100644 index 0000000..01c880e --- /dev/null +++ b/docs/explanation/workload.md @@ -0,0 +1,22 @@ +# Managing workload inside the charm +The core jenkins agent workload requires 3 main parameters (JENKINS_URL, JENKINS_AGENT and JENKINS_SECRET) and is defined as a 2-step process: + +1. Download the agent binary at JENKINS_URL/jnlpJars/agent.jar and store it in the agent’s home directory +2. Run the agent binary with the following parameters to register the node with Jenkins +``` +/usr/bin/java -jar agent.jar \\ +-jnlpUrl "" \\ +-workDir "${JENKINS_WORKDIR}" \\ +-noReconnect \\ +-secret "${JENKINS_SECRET}" +``` +In the charm, this workload is managed using an [apt package](https://launchpad.net/~canonical-is-devops/+archive/ubuntu/jenkins-agent-charm) which installs a systemd service that can be configured via a configuration file. +``` +# File: /etc/systemd/system/jenkins-agent.service.d/override.conf +[Service] +Environment="JENKINS_SECRET=secret" +Environment="JENKINS_URL=url" +Environment="JENKINS_AGENT=node-name" +``` + +The service won’t start automatically through the use of the `--no-start` option during packaging in order to allow flexibility between running the workload as a service and as a standalone executable, located at `/usr/bin/jenkins-agent`. \ No newline at end of file diff --git a/docs/how-to/configure-agent-node-label.md b/docs/how-to/configure-agent-node-label.md new file mode 100644 index 0000000..994740d --- /dev/null +++ b/docs/how-to/configure-agent-node-label.md @@ -0,0 +1,10 @@ +# How to configure installable plugins + +### Configure `jenkins_agent_labels` + +Use the `jenkins_agent_labels` configuration to allow assigning different labels to the agent's node on the jenkins server. +Comma-separated list of node labels. If empty, the agent's node will have the underlying machine's arch as a label by default, most of the time this will be `x86_64`. If this value is configured before any integrations with the jenkins charm is established, the label will be applied during the node's creation once an integration has been established. + +``` +juju config jenkins-agent jenkins_agent_labels=label1,label2,label3 +``` diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..d643dce --- /dev/null +++ b/docs/index.md @@ -0,0 +1,32 @@ +# Jenkins-k8s Operator + +A [Juju](https://juju.is/) [charm](https://juju.is/docs/olm/charmed-operators) deploying and managing [Jenkins](https://www.jenkins.io/) Agent on machines and configurable to use a Jenkins charm deployed in another Juju model. + +This charm simplifies initial deployment and "day N" operations of Jenkins Agent on VMs and bare metal. + +As such, the charm makes it easy for those looking to take control of their own agents whilst keeping operations simple, and gives them the freedom to deploy on the platform of their choice. + +For DevOps or SRE teams this charm will make operating Jenkins Agent simple and straightforward through Juju's clean interface. It will allow easy deployment into multiple environments for testing changes, and supports scaling out for enterprise deployments. + +## Project and community + +The Jenkins-agent Operator is a member of the Ubuntu family. It's an open source project that warmly welcomes community projects, contributions, suggestions, fixes and constructive feedback. + +- [Code of conduct](https://ubuntu.com/community/code-of-conduct) +- [Get support](https://discourse.charmhub.io/) +- [Join our online chat](https://app.element.io/#/room/#charmhub-charmdev:ubuntu.com) +- [Contribute](Contribute) + +Thinking about using the Jenkins-k8s Operator for your next project? [Get in touch](https://app.element.io/#/room/#charmhub-charmdev:ubuntu.com)! + +# Contents + +1. [Tutorial](tutorial) + 1. [Getting Started](tutorial/getting-started.md) +1. [How to](how-to) + 1. [Configure agent node label](how-to/configure-agent-node-label.md) +1. [Reference](reference) + 1. [Actions](reference/actions.md) + 1. [Configurations](reference/configurations.md) + 1. [Integrations](reference/integrations.md) +1. [Explanation](explanation) diff --git a/docs/reference/actions.md b/docs/reference/actions.md new file mode 100644 index 0000000..8185743 --- /dev/null +++ b/docs/reference/actions.md @@ -0,0 +1,3 @@ +# Actions + +See [Actions](https://charmhub.io/jenkins-agent/actions). diff --git a/docs/reference/configurations.md b/docs/reference/configurations.md new file mode 100644 index 0000000..ccf3dde --- /dev/null +++ b/docs/reference/configurations.md @@ -0,0 +1,3 @@ +# Configurations + +See [Configure](https://charmhub.io/jenkins-agent/configure). diff --git a/docs/reference/integrations.md b/docs/reference/integrations.md new file mode 100644 index 0000000..216216c --- /dev/null +++ b/docs/reference/integrations.md @@ -0,0 +1,21 @@ +# Integrations + +### agent + +_Interface_: jenkins_agent_v0 +_Supported charms_: [jenkins-k8s](https://charmhub.io/jenkins-agent-k8s), + +Jenkins agents provide a way to perform tasks scheduled by the Jenkins server. Jenkins agents are +used to distribute workload across multiple containers, allowing parallel execution of jobs. + +To create a [cross model integration](https://juju.is/docs/olm/manage-cross-model-integrations) with +a jenkins-agent (VM) charm, create an offer from the machine model. + +`juju offer jenkins-agent:agent` + +Then, integrate the offer from the k8s model where jenkins-k8s charm resides. + +`juju integrate jenkins-k8s:agent :/.jenkins-agent` + +An example of such command would look like the following, using a jenkins-k8s charm deployed on microk8s. +`juju integrate jenkins-k8s:agent localhost:admin/jenkins-agent-model.jenkins-agent` diff --git a/docs/tutorial/getting-started.md b/docs/tutorial/getting-started.md new file mode 100644 index 0000000..edf606e --- /dev/null +++ b/docs/tutorial/getting-started.md @@ -0,0 +1,100 @@ +# Getting Started + +## What you'll do + +- Deploy the [jenkins-agent charm](https://charmhub.io/jenkins-agent) +- Deploy the [jenkins-k8s charm](https://charmhub.io/jenkins-k8s) and integrate with it via a cross-model integration + +The `jenkins-agent` charm helps deploy a Jenkins agent with ease and also helps operate the charm. This +tutorial will walk you through each step of deployment to get a basic Jenkins agent deployment and integrate it with Jenkins. + +### Prerequisites + +To deploy the `jenkins-agent` charm, you'll need to have a bootstrapped machine model. Learn about +bootstrapping different clouds [here](https://juju.is/docs/olm/get-started-with-juju#heading--prepare-your-cloud). + +Use `juju bootstrap localhost localhost` to bootstrap a `lxd` machine controller with the name +`localhost` for tutorial purposes. + +### Setting up the tutorial model + +To easily clean up the resources and to separate your workload from the contents of this tutorial, +it is recommended to set up a new model with the following command. + +``` +juju add-model tutorial +``` + +### Deploy the jenkins-agent charm + +Start off by deploying the jenkins-agent charm. By default it will deploy the latest stable release +of the jenkins-agent charm. + +``` +# Deploy an edge version of the charm until stable version is released. +juju deploy jenkins-agent --channel=latest/edge +``` + +### Deploy and integrate with the jenkins-k8s charm + +To deploy jenkins-k8s charm, you will need a juju bootstrapped with any kubernetes controller. +To see how to bootstrap your juju installation with microk8s, please refer to the documentation +on microk8s [installation](https://juju.is/docs/olm/microk8s). + +Use `juju bootstrap microk8s localhost-microk8s` to bootstrap a `microk8s` machine controller with the name +`localhost-microk8s` for tutorial purposes. + +Then, switch to your kubernetes controller add a model for the jenkins-k8s charm with the following command: +``` +juju switch -c localhost-microk8s +juju add-model jenkins-tutorial +``` + +Continue by deploying the jenkins-k8s charm. by default it will deploy the latest stable release of the jenkins-k8s charm: +``` +juju deploy jenkins-k8s --channel=latest/edge +``` + +The Jenkins application can only have a single server unit. Adding more units through --num-units parameter will cause the application to misbehave. + +#### Create an offer for Cross Model Integration + +To integrate charms +[across different models](https://juju.is/docs/juju/manage-cross-model-integrations), a juju +[`offer`](https://juju.is/docs/juju/manage-cross-model-integrations#heading--create-an-offer) is +required. + +Create an offer of the `jenkins-k8s` charm's `agent` integration. + +``` +juju offer jenkins-k8s:agent +``` + +The output should look similar to the contents below: + +``` +Application "jenkins-k8s" endpoints [agent] available at "admin/jenkins-tutorial.jenkins-k8s" +``` + +#### Integrate the Jenkins agent charm through the offer + +Switch back to the k8s model where the `jenkins-agent` charm is deployed. An example of the switch +command looks like the following: `juju switch localhost:tutorial`. + +Integrate the `jenkins-agent` charm to the `jenkins-k8s` server charm through the offer. +The syntax of the offer is as follows: `:/.`. + +``` +juju integrate jenkins-agent:agent localhost-microk8s:admin/jenkins-tutorial.jenkins-agent +``` + + +### Cleaning up the environment + +Congratulations! You have successfully finished the tutorial. You can now remove the +models that you’ve created using the following command. + +``` +juju destroy model localhost-microk8s:admin/jenkins-tutorial -y --release-storage +juju destroy model localhost:admin/tutorial -y --release-storage +``` diff --git a/lib/charms/operator_libs_linux/v0/apt.py b/lib/charms/operator_libs_linux/v0/apt.py new file mode 100644 index 0000000..7afb183 --- /dev/null +++ b/lib/charms/operator_libs_linux/v0/apt.py @@ -0,0 +1,1361 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Abstractions for the system's Debian/Ubuntu package information and repositories. + +This module contains abstractions and wrappers around Debian/Ubuntu-style repositories and +packages, in order to easily provide an idiomatic and Pythonic mechanism for adding packages and/or +repositories to systems for use in machine charms. + +A sane default configuration is attainable through nothing more than instantiation of the +appropriate classes. `DebianPackage` objects provide information about the architecture, version, +name, and status of a package. + +`DebianPackage` will try to look up a package either from `dpkg -L` or from `apt-cache` when +provided with a string indicating the package name. If it cannot be located, `PackageNotFoundError` +will be returned, as `apt` and `dpkg` otherwise return `100` for all errors, and a meaningful error +message if the package is not known is desirable. + +To install packages with convenience methods: + +```python +try: + # Run `apt-get update` + apt.update() + apt.add_package("zsh") + apt.add_package(["vim", "htop", "wget"]) +except PackageNotFoundError: + logger.error("a specified package not found in package cache or on system") +except PackageError as e: + logger.error("could not install package. Reason: %s", e.message) +```` + +To find details of a specific package: + +```python +try: + vim = apt.DebianPackage.from_system("vim") + + # To find from the apt cache only + # apt.DebianPackage.from_apt_cache("vim") + + # To find from installed packages only + # apt.DebianPackage.from_installed_package("vim") + + vim.ensure(PackageState.Latest) + logger.info("updated vim to version: %s", vim.fullversion) +except PackageNotFoundError: + logger.error("a specified package not found in package cache or on system") +except PackageError as e: + logger.error("could not install package. Reason: %s", e.message) +``` + + +`RepositoryMapping` will return a dict-like object containing enabled system repositories +and their properties (available groups, baseuri. gpg key). This class can add, disable, or +manipulate repositories. Items can be retrieved as `DebianRepository` objects. + +In order add a new repository with explicit details for fields, a new `DebianRepository` can +be added to `RepositoryMapping` + +`RepositoryMapping` provides an abstraction around the existing repositories on the system, +and can be accessed and iterated over like any `Mapping` object, to retrieve values by key, +iterate, or perform other operations. + +Keys are constructed as `{repo_type}-{}-{release}` in order to uniquely identify a repository. + +Repositories can be added with explicit values through a Python constructor. + +Example: +```python +repositories = apt.RepositoryMapping() + +if "deb-example.com-focal" not in repositories: + repositories.add(DebianRepository(enabled=True, repotype="deb", + uri="https://example.com", release="focal", groups=["universe"])) +``` + +Alternatively, any valid `sources.list` line may be used to construct a new +`DebianRepository`. + +Example: +```python +repositories = apt.RepositoryMapping() + +if "deb-us.archive.ubuntu.com-xenial" not in repositories: + line = "deb http://us.archive.ubuntu.com/ubuntu xenial main restricted" + repo = DebianRepository.from_repo_line(line) + repositories.add(repo) +``` +""" + +import fileinput +import glob +import logging +import os +import re +import subprocess +from collections.abc import Mapping +from enum import Enum +from subprocess import PIPE, CalledProcessError, check_call, check_output +from typing import Iterable, List, Optional, Tuple, Union +from urllib.parse import urlparse + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "7c3dbc9c2ad44a47bd6fcb25caa270e5" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 11 + + +VALID_SOURCE_TYPES = ("deb", "deb-src") +OPTIONS_MATCHER = re.compile(r"\[.*?\]") + + +class Error(Exception): + """Base class of most errors raised by this library.""" + + def __repr__(self): + """Represent the Error.""" + return "<{}.{} {}>".format(type(self).__module__, type(self).__name__, self.args) + + @property + def name(self): + """Return a string representation of the model plus class.""" + return "<{}.{}>".format(type(self).__module__, type(self).__name__) + + @property + def message(self): + """Return the message passed as an argument.""" + return self.args[0] + + +class PackageError(Error): + """Raised when there's an error installing or removing a package.""" + + +class PackageNotFoundError(Error): + """Raised when a requested package is not known to the system.""" + + +class PackageState(Enum): + """A class to represent possible package states.""" + + Present = "present" + Absent = "absent" + Latest = "latest" + Available = "available" + + +class DebianPackage: + """Represents a traditional Debian package and its utility functions. + + `DebianPackage` wraps information and functionality around a known package, whether installed + or available. The version, epoch, name, and architecture can be easily queried and compared + against other `DebianPackage` objects to determine the latest version or to install a specific + version. + + The representation of this object as a string mimics the output from `dpkg` for familiarity. + + Installation and removal of packages is handled through the `state` property or `ensure` + method, with the following options: + + apt.PackageState.Absent + apt.PackageState.Available + apt.PackageState.Present + apt.PackageState.Latest + + When `DebianPackage` is initialized, the state of a given `DebianPackage` object will be set to + `Available`, `Present`, or `Latest`, with `Absent` implemented as a convenience for removal + (though it operates essentially the same as `Available`). + """ + + def __init__( + self, name: str, version: str, epoch: str, arch: str, state: PackageState + ) -> None: + self._name = name + self._arch = arch + self._state = state + self._version = Version(version, epoch) + + def __eq__(self, other) -> bool: + """Equality for comparison. + + Args: + other: a `DebianPackage` object for comparison + + Returns: + A boolean reflecting equality + """ + return isinstance(other, self.__class__) and ( + self._name, + self._version.number, + ) == (other._name, other._version.number) + + def __hash__(self): + """Return a hash of this package.""" + return hash((self._name, self._version.number)) + + def __repr__(self): + """Represent the package.""" + return "<{}.{}: {}>".format(self.__module__, self.__class__.__name__, self.__dict__) + + def __str__(self): + """Return a human-readable representation of the package.""" + return "<{}: {}-{}.{} -- {}>".format( + self.__class__.__name__, + self._name, + self._version, + self._arch, + str(self._state), + ) + + @staticmethod + def _apt( + command: str, + package_names: Union[str, List], + optargs: Optional[List[str]] = None, + ) -> None: + """Wrap package management commands for Debian/Ubuntu systems. + + Args: + command: the command given to `apt-get` + package_names: a package name or list of package names to operate on + optargs: an (Optional) list of additioanl arguments + + Raises: + PackageError if an error is encountered + """ + optargs = optargs if optargs is not None else [] + if isinstance(package_names, str): + package_names = [package_names] + _cmd = ["apt-get", "-y", *optargs, command, *package_names] + try: + env = os.environ.copy() + env["DEBIAN_FRONTEND"] = "noninteractive" + check_call(_cmd, env=env, stderr=PIPE, stdout=PIPE) + except CalledProcessError as e: + raise PackageError( + "Could not {} package(s) [{}]: {}".format(command, [*package_names], e.output) + ) from None + + def _add(self) -> None: + """Add a package to the system.""" + self._apt( + "install", + "{}={}".format(self.name, self.version), + optargs=["--option=Dpkg::Options::=--force-confold"], + ) + + def _remove(self) -> None: + """Remove a package from the system. Implementation-specific.""" + return self._apt("remove", "{}={}".format(self.name, self.version)) + + @property + def name(self) -> str: + """Returns the name of the package.""" + return self._name + + def ensure(self, state: PackageState): + """Ensure that a package is in a given state. + + Args: + state: a `PackageState` to reconcile the package to + + Raises: + PackageError from the underlying call to apt + """ + if self._state is not state: + if state not in (PackageState.Present, PackageState.Latest): + self._remove() + else: + self._add() + self._state = state + + @property + def present(self) -> bool: + """Returns whether or not a package is present.""" + return self._state in (PackageState.Present, PackageState.Latest) + + @property + def latest(self) -> bool: + """Returns whether the package is the most recent version.""" + return self._state is PackageState.Latest + + @property + def state(self) -> PackageState: + """Returns the current package state.""" + return self._state + + @state.setter + def state(self, state: PackageState) -> None: + """Set the package state to a given value. + + Args: + state: a `PackageState` to reconcile the package to + + Raises: + PackageError from the underlying call to apt + """ + if state in (PackageState.Latest, PackageState.Present): + self._add() + else: + self._remove() + self._state = state + + @property + def version(self) -> "Version": + """Returns the version for a package.""" + return self._version + + @property + def epoch(self) -> str: + """Returns the epoch for a package. May be unset.""" + return self._version.epoch + + @property + def arch(self) -> str: + """Returns the architecture for a package.""" + return self._arch + + @property + def fullversion(self) -> str: + """Returns the name+epoch for a package.""" + return "{}.{}".format(self._version, self._arch) + + @staticmethod + def _get_epoch_from_version(version: str) -> Tuple[str, str]: + """Pull the epoch, if any, out of a version string.""" + epoch_matcher = re.compile(r"^((?P\d+):)?(?P.*)") + matches = epoch_matcher.search(version).groupdict() + return matches.get("epoch", ""), matches.get("version") + + @classmethod + def from_system( + cls, package: str, version: Optional[str] = "", arch: Optional[str] = "" + ) -> "DebianPackage": + """Locates a package, either on the system or known to apt, and serializes the information. + + Args: + package: a string representing the package + version: an optional string if a specific version is requested + arch: an optional architecture, defaulting to `dpkg --print-architecture`. If an + architecture is not specified, this will be used for selection. + + """ + try: + return DebianPackage.from_installed_package(package, version, arch) + except PackageNotFoundError: + logger.debug( + "package '%s' is not currently installed or has the wrong architecture.", package + ) + + # Ok, try `apt-cache ...` + try: + return DebianPackage.from_apt_cache(package, version, arch) + except (PackageNotFoundError, PackageError): + # If we get here, it's not known to the systems. + # This seems unnecessary, but virtually all `apt` commands have a return code of `100`, + # and providing meaningful error messages without this is ugly. + raise PackageNotFoundError( + "Package '{}{}' could not be found on the system or in the apt cache!".format( + package, ".{}".format(arch) if arch else "" + ) + ) from None + + @classmethod + def from_installed_package( + cls, package: str, version: Optional[str] = "", arch: Optional[str] = "" + ) -> "DebianPackage": + """Check whether the package is already installed and return an instance. + + Args: + package: a string representing the package + version: an optional string if a specific version is requested + arch: an optional architecture, defaulting to `dpkg --print-architecture`. + If an architecture is not specified, this will be used for selection. + """ + system_arch = check_output( + ["dpkg", "--print-architecture"], universal_newlines=True + ).strip() + arch = arch if arch else system_arch + + # Regexps are a really terrible way to do this. Thanks dpkg + output = "" + try: + output = check_output(["dpkg", "-l", package], stderr=PIPE, universal_newlines=True) + except CalledProcessError: + raise PackageNotFoundError("Package is not installed: {}".format(package)) from None + + # Pop off the output from `dpkg -l' because there's no flag to + # omit it` + lines = str(output).splitlines()[5:] + + dpkg_matcher = re.compile( + r""" + ^(?P\w+?)\s+ + (?P.*?)(?P:\w+?)?\s+ + (?P.*?)\s+ + (?P\w+?)\s+ + (?P.*) + """, + re.VERBOSE, + ) + + for line in lines: + try: + matches = dpkg_matcher.search(line).groupdict() + package_status = matches["package_status"] + + if not package_status.endswith("i"): + logger.debug( + "package '%s' in dpkg output but not installed, status: '%s'", + package, + package_status, + ) + break + + epoch, split_version = DebianPackage._get_epoch_from_version(matches["version"]) + pkg = DebianPackage( + matches["package_name"], + split_version, + epoch, + matches["arch"], + PackageState.Present, + ) + if (pkg.arch == "all" or pkg.arch == arch) and ( + version == "" or str(pkg.version) == version + ): + return pkg + except AttributeError: + logger.warning("dpkg matcher could not parse line: %s", line) + + # If we didn't find it, fail through + raise PackageNotFoundError("Package {}.{} is not installed!".format(package, arch)) + + @classmethod + def from_apt_cache( + cls, package: str, version: Optional[str] = "", arch: Optional[str] = "" + ) -> "DebianPackage": + """Check whether the package is already installed and return an instance. + + Args: + package: a string representing the package + version: an optional string if a specific version is requested + arch: an optional architecture, defaulting to `dpkg --print-architecture`. + If an architecture is not specified, this will be used for selection. + """ + system_arch = check_output( + ["dpkg", "--print-architecture"], universal_newlines=True + ).strip() + arch = arch if arch else system_arch + + # Regexps are a really terrible way to do this. Thanks dpkg + keys = ("Package", "Architecture", "Version") + + try: + output = check_output( + ["apt-cache", "show", package], stderr=PIPE, universal_newlines=True + ) + except CalledProcessError as e: + raise PackageError( + "Could not list packages in apt-cache: {}".format(e.output) + ) from None + + pkg_groups = output.strip().split("\n\n") + keys = ("Package", "Architecture", "Version") + + for pkg_raw in pkg_groups: + lines = str(pkg_raw).splitlines() + vals = {} + for line in lines: + if line.startswith(keys): + items = line.split(":", 1) + vals[items[0]] = items[1].strip() + else: + continue + + epoch, split_version = DebianPackage._get_epoch_from_version(vals["Version"]) + pkg = DebianPackage( + vals["Package"], + split_version, + epoch, + vals["Architecture"], + PackageState.Available, + ) + + if (pkg.arch == "all" or pkg.arch == arch) and ( + version == "" or str(pkg.version) == version + ): + return pkg + + # If we didn't find it, fail through + raise PackageNotFoundError("Package {}.{} is not in the apt cache!".format(package, arch)) + + +class Version: + """An abstraction around package versions. + + This seems like it should be strictly unnecessary, except that `apt_pkg` is not usable inside a + venv, and wedging version comparisons into `DebianPackage` would overcomplicate it. + + This class implements the algorithm found here: + https://www.debian.org/doc/debian-policy/ch-controlfields.html#version + """ + + def __init__(self, version: str, epoch: str): + self._version = version + self._epoch = epoch or "" + + def __repr__(self): + """Represent the package.""" + return "<{}.{}: {}>".format(self.__module__, self.__class__.__name__, self.__dict__) + + def __str__(self): + """Return human-readable representation of the package.""" + return "{}{}".format("{}:".format(self._epoch) if self._epoch else "", self._version) + + @property + def epoch(self): + """Returns the epoch for a package. May be empty.""" + return self._epoch + + @property + def number(self) -> str: + """Returns the version number for a package.""" + return self._version + + def _get_parts(self, version: str) -> Tuple[str, str]: + """Separate the version into component upstream and Debian pieces.""" + try: + version.rindex("-") + except ValueError: + # No hyphens means no Debian version + return version, "0" + + upstream, debian = version.rsplit("-", 1) + return upstream, debian + + def _listify(self, revision: str) -> List[str]: + """Split a revision string into a listself. + + This list is comprised of alternating between strings and numbers, + padded on either end to always be "str, int, str, int..." and + always be of even length. This allows us to trivially implement the + comparison algorithm described. + """ + result = [] + while revision: + rev_1, remains = self._get_alphas(revision) + rev_2, remains = self._get_digits(remains) + result.extend([rev_1, rev_2]) + revision = remains + return result + + def _get_alphas(self, revision: str) -> Tuple[str, str]: + """Return a tuple of the first non-digit characters of a revision.""" + # get the index of the first digit + for i, char in enumerate(revision): + if char.isdigit(): + if i == 0: + return "", revision + return revision[0:i], revision[i:] + # string is entirely alphas + return revision, "" + + def _get_digits(self, revision: str) -> Tuple[int, str]: + """Return a tuple of the first integer characters of a revision.""" + # If the string is empty, return (0,'') + if not revision: + return 0, "" + # get the index of the first non-digit + for i, char in enumerate(revision): + if not char.isdigit(): + if i == 0: + return 0, revision + return int(revision[0:i]), revision[i:] + # string is entirely digits + return int(revision), "" + + def _dstringcmp(self, a, b): # noqa: C901 + """Debian package version string section lexical sort algorithm. + + The lexical comparison is a comparison of ASCII values modified so + that all the letters sort earlier than all the non-letters and so that + a tilde sorts before anything, even the end of a part. + """ + if a == b: + return 0 + try: + for i, char in enumerate(a): + if char == b[i]: + continue + # "a tilde sorts before anything, even the end of a part" + # (emptyness) + if char == "~": + return -1 + if b[i] == "~": + return 1 + # "all the letters sort earlier than all the non-letters" + if char.isalpha() and not b[i].isalpha(): + return -1 + if not char.isalpha() and b[i].isalpha(): + return 1 + # otherwise lexical sort + if ord(char) > ord(b[i]): + return 1 + if ord(char) < ord(b[i]): + return -1 + except IndexError: + # a is longer than b but otherwise equal, greater unless there are tildes + if char == "~": + return -1 + return 1 + # if we get here, a is shorter than b but otherwise equal, so check for tildes... + if b[len(a)] == "~": + return 1 + return -1 + + def _compare_revision_strings(self, first: str, second: str): # noqa: C901 + """Compare two debian revision strings.""" + if first == second: + return 0 + + # listify pads results so that we will always be comparing ints to ints + # and strings to strings (at least until we fall off the end of a list) + first_list = self._listify(first) + second_list = self._listify(second) + if first_list == second_list: + return 0 + try: + for i, item in enumerate(first_list): + # explicitly raise IndexError if we've fallen off the edge of list2 + if i >= len(second_list): + raise IndexError + # if the items are equal, next + if item == second_list[i]: + continue + # numeric comparison + if isinstance(item, int): + if item > second_list[i]: + return 1 + if item < second_list[i]: + return -1 + else: + # string comparison + return self._dstringcmp(item, second_list[i]) + except IndexError: + # rev1 is longer than rev2 but otherwise equal, hence greater + # ...except for goddamn tildes + if first_list[len(second_list)][0][0] == "~": + return 1 + return 1 + # rev1 is shorter than rev2 but otherwise equal, hence lesser + # ...except for goddamn tildes + if second_list[len(first_list)][0][0] == "~": + return -1 + return -1 + + def _compare_version(self, other) -> int: + if (self.number, self.epoch) == (other.number, other.epoch): + return 0 + + if self.epoch < other.epoch: + return -1 + if self.epoch > other.epoch: + return 1 + + # If none of these are true, follow the algorithm + upstream_version, debian_version = self._get_parts(self.number) + other_upstream_version, other_debian_version = self._get_parts(other.number) + + upstream_cmp = self._compare_revision_strings(upstream_version, other_upstream_version) + if upstream_cmp != 0: + return upstream_cmp + + debian_cmp = self._compare_revision_strings(debian_version, other_debian_version) + if debian_cmp != 0: + return debian_cmp + + return 0 + + def __lt__(self, other) -> bool: + """Less than magic method impl.""" + return self._compare_version(other) < 0 + + def __eq__(self, other) -> bool: + """Equality magic method impl.""" + return self._compare_version(other) == 0 + + def __gt__(self, other) -> bool: + """Greater than magic method impl.""" + return self._compare_version(other) > 0 + + def __le__(self, other) -> bool: + """Less than or equal to magic method impl.""" + return self.__eq__(other) or self.__lt__(other) + + def __ge__(self, other) -> bool: + """Greater than or equal to magic method impl.""" + return self.__gt__(other) or self.__eq__(other) + + def __ne__(self, other) -> bool: + """Not equal to magic method impl.""" + return not self.__eq__(other) + + +def add_package( + package_names: Union[str, List[str]], + version: Optional[str] = "", + arch: Optional[str] = "", + update_cache: Optional[bool] = False, +) -> Union[DebianPackage, List[DebianPackage]]: + """Add a package or list of packages to the system. + + Args: + package_names: single package name, or list of package names + name: the name(s) of the package(s) + version: an (Optional) version as a string. Defaults to the latest known + arch: an optional architecture for the package + update_cache: whether or not to run `apt-get update` prior to operating + + Raises: + TypeError if no package name is given, or explicit version is set for multiple packages + PackageNotFoundError if the package is not in the cache. + PackageError if packages fail to install + """ + cache_refreshed = False + if update_cache: + update() + cache_refreshed = True + + packages = {"success": [], "retry": [], "failed": []} + + package_names = [package_names] if type(package_names) is str else package_names + if not package_names: + raise TypeError("Expected at least one package name to add, received zero!") + + if len(package_names) != 1 and version: + raise TypeError( + "Explicit version should not be set if more than one package is being added!" + ) + + for p in package_names: + pkg, success = _add(p, version, arch) + if success: + packages["success"].append(pkg) + else: + logger.warning("failed to locate and install/update '%s'", pkg) + packages["retry"].append(p) + + if packages["retry"] and not cache_refreshed: + logger.info("updating the apt-cache and retrying installation of failed packages.") + update() + + for p in packages["retry"]: + pkg, success = _add(p, version, arch) + if success: + packages["success"].append(pkg) + else: + packages["failed"].append(p) + + if packages["failed"]: + raise PackageError("Failed to install packages: {}".format(", ".join(packages["failed"]))) + + return packages["success"] if len(packages["success"]) > 1 else packages["success"][0] + + +def _add( + name: str, + version: Optional[str] = "", + arch: Optional[str] = "", +) -> Tuple[Union[DebianPackage, str], bool]: + """Add a package to the system. + + Args: + name: the name(s) of the package(s) + version: an (Optional) version as a string. Defaults to the latest known + arch: an optional architecture for the package + + Returns: a tuple of `DebianPackage` if found, or a :str: if it is not, and + a boolean indicating success + """ + try: + pkg = DebianPackage.from_system(name, version, arch) + pkg.ensure(state=PackageState.Present) + return pkg, True + except PackageNotFoundError: + return name, False + + +def remove_package( + package_names: Union[str, List[str]] +) -> Union[DebianPackage, List[DebianPackage]]: + """Remove package(s) from the system. + + Args: + package_names: the name of a package + + Raises: + PackageNotFoundError if the package is not found. + """ + packages = [] + + package_names = [package_names] if type(package_names) is str else package_names + if not package_names: + raise TypeError("Expected at least one package name to add, received zero!") + + for p in package_names: + try: + pkg = DebianPackage.from_installed_package(p) + pkg.ensure(state=PackageState.Absent) + packages.append(pkg) + except PackageNotFoundError: + logger.info("package '%s' was requested for removal, but it was not installed.", p) + + # the list of packages will be empty when no package is removed + logger.debug("packages: '%s'", packages) + return packages[0] if len(packages) == 1 else packages + + +def update() -> None: + """Update the apt cache via `apt-get update`.""" + check_call(["apt-get", "update"], stderr=PIPE, stdout=PIPE) + + +def import_key(key: str) -> str: + """Import an ASCII Armor key. + + A Radix64 format keyid is also supported for backwards + compatibility. In this case Ubuntu keyserver will be + queried for a key via HTTPS by its keyid. This method + is less preferable because https proxy servers may + require traffic decryption which is equivalent to a + man-in-the-middle attack (a proxy server impersonates + keyserver TLS certificates and has to be explicitly + trusted by the system). + + Args: + key: A GPG key in ASCII armor format, including BEGIN + and END markers or a keyid. + + Returns: + The GPG key filename written. + + Raises: + GPGKeyError if the key could not be imported + """ + key = key.strip() + if "-" in key or "\n" in key: + # Send everything not obviously a keyid to GPG to import, as + # we trust its validation better than our own. eg. handling + # comments before the key. + logger.debug("PGP key found (looks like ASCII Armor format)") + if ( + "-----BEGIN PGP PUBLIC KEY BLOCK-----" in key + and "-----END PGP PUBLIC KEY BLOCK-----" in key + ): + logger.debug("Writing provided PGP key in the binary format") + key_bytes = key.encode("utf-8") + key_name = DebianRepository._get_keyid_by_gpg_key(key_bytes) + key_gpg = DebianRepository._dearmor_gpg_key(key_bytes) + gpg_key_filename = "/etc/apt/trusted.gpg.d/{}.gpg".format(key_name) + DebianRepository._write_apt_gpg_keyfile( + key_name=gpg_key_filename, key_material=key_gpg + ) + return gpg_key_filename + else: + raise GPGKeyError("ASCII armor markers missing from GPG key") + else: + logger.warning( + "PGP key found (looks like Radix64 format). " + "SECURELY importing PGP key from keyserver; " + "full key not provided." + ) + # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL + # to retrieve GPG keys. `apt-key adv` command is deprecated as is + # apt-key in general as noted in its manpage. See lp:1433761 for more + # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop + # gpg + key_asc = DebianRepository._get_key_by_keyid(key) + # write the key in GPG format so that apt-key list shows it + key_gpg = DebianRepository._dearmor_gpg_key(key_asc.encode("utf-8")) + gpg_key_filename = "/etc/apt/trusted.gpg.d/{}.gpg".format(key) + DebianRepository._write_apt_gpg_keyfile(key_name=gpg_key_filename, key_material=key_gpg) + return gpg_key_filename + + +class InvalidSourceError(Error): + """Exceptions for invalid source entries.""" + + +class GPGKeyError(Error): + """Exceptions for GPG keys.""" + + +class DebianRepository: + """An abstraction to represent a repository.""" + + def __init__( + self, + enabled: bool, + repotype: str, + uri: str, + release: str, + groups: List[str], + filename: Optional[str] = "", + gpg_key_filename: Optional[str] = "", + options: Optional[dict] = None, + ): + self._enabled = enabled + self._repotype = repotype + self._uri = uri + self._release = release + self._groups = groups + self._filename = filename + self._gpg_key_filename = gpg_key_filename + self._options = options + + @property + def enabled(self): + """Return whether or not the repository is enabled.""" + return self._enabled + + @property + def repotype(self): + """Return whether it is binary or source.""" + return self._repotype + + @property + def uri(self): + """Return the URI.""" + return self._uri + + @property + def release(self): + """Return which Debian/Ubuntu releases it is valid for.""" + return self._release + + @property + def groups(self): + """Return the enabled package groups.""" + return self._groups + + @property + def filename(self): + """Returns the filename for a repository.""" + return self._filename + + @filename.setter + def filename(self, fname: str) -> None: + """Set the filename used when a repo is written back to disk. + + Args: + fname: a filename to write the repository information to. + """ + if not fname.endswith(".list"): + raise InvalidSourceError("apt source filenames should end in .list!") + + self._filename = fname + + @property + def gpg_key(self): + """Returns the path to the GPG key for this repository.""" + return self._gpg_key_filename + + @property + def options(self): + """Returns any additional repo options which are set.""" + return self._options + + def make_options_string(self) -> str: + """Generate the complete options string for a a repository. + + Combining `gpg_key`, if set, and the rest of the options to find + a complex repo string. + """ + options = self._options if self._options else {} + if self._gpg_key_filename: + options["signed-by"] = self._gpg_key_filename + + return ( + "[{}] ".format(" ".join(["{}={}".format(k, v) for k, v in options.items()])) + if options + else "" + ) + + @staticmethod + def prefix_from_uri(uri: str) -> str: + """Get a repo list prefix from the uri, depending on whether a path is set.""" + uridetails = urlparse(uri) + path = ( + uridetails.path.lstrip("/").replace("/", "-") if uridetails.path else uridetails.netloc + ) + return "/etc/apt/sources.list.d/{}".format(path) + + @staticmethod + def from_repo_line(repo_line: str, write_file: Optional[bool] = True) -> "DebianRepository": + """Instantiate a new `DebianRepository` a `sources.list` entry line. + + Args: + repo_line: a string representing a repository entry + write_file: boolean to enable writing the new repo to disk + """ + repo = RepositoryMapping._parse(repo_line, "UserInput") + fname = "{}-{}.list".format( + DebianRepository.prefix_from_uri(repo.uri), repo.release.replace("/", "-") + ) + repo.filename = fname + + options = repo.options if repo.options else {} + if repo.gpg_key: + options["signed-by"] = repo.gpg_key + + # For Python 3.5 it's required to use sorted in the options dict in order to not have + # different results in the order of the options between executions. + options_str = ( + "[{}] ".format(" ".join(["{}={}".format(k, v) for k, v in sorted(options.items())])) + if options + else "" + ) + + if write_file: + with open(fname, "wb") as f: + f.write( + ( + "{}".format("#" if not repo.enabled else "") + + "{} {}{} ".format(repo.repotype, options_str, repo.uri) + + "{} {}\n".format(repo.release, " ".join(repo.groups)) + ).encode("utf-8") + ) + + return repo + + def disable(self) -> None: + """Remove this repository from consideration. + + Disable it instead of removing from the repository file. + """ + searcher = "{} {}{} {}".format( + self.repotype, self.make_options_string(), self.uri, self.release + ) + for line in fileinput.input(self._filename, inplace=True): + if re.match(r"^{}\s".format(re.escape(searcher)), line): + print("# {}".format(line), end="") + else: + print(line, end="") + + def import_key(self, key: str) -> None: + """Import an ASCII Armor key. + + A Radix64 format keyid is also supported for backwards + compatibility. In this case Ubuntu keyserver will be + queried for a key via HTTPS by its keyid. This method + is less preferable because https proxy servers may + require traffic decryption which is equivalent to a + man-in-the-middle attack (a proxy server impersonates + keyserver TLS certificates and has to be explicitly + trusted by the system). + + Args: + key: A GPG key in ASCII armor format, + including BEGIN and END markers or a keyid. + + Raises: + GPGKeyError if the key could not be imported + """ + self._gpg_key_filename = import_key(key) + + @staticmethod + def _get_keyid_by_gpg_key(key_material: bytes) -> str: + """Get a GPG key fingerprint by GPG key material. + + Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded + or binary GPG key material. Can be used, for example, to generate file + names for keys passed via charm options. + """ + # Use the same gpg command for both Xenial and Bionic + cmd = ["gpg", "--with-colons", "--with-fingerprint"] + ps = subprocess.run( + cmd, + stdout=PIPE, + stderr=PIPE, + input=key_material, + ) + out, err = ps.stdout.decode(), ps.stderr.decode() + if "gpg: no valid OpenPGP data found." in err: + raise GPGKeyError("Invalid GPG key material provided") + # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) + return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) + + @staticmethod + def _get_key_by_keyid(keyid: str) -> str: + """Get a key via HTTPS from the Ubuntu keyserver. + + Different key ID formats are supported by SKS keyservers (the longer ones + are more secure, see "dead beef attack" and https://evil32.com/). Since + HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will + impersonate keyserver.ubuntu.com and generate a certificate with + keyserver.ubuntu.com in the CN field or in SubjAltName fields of a + certificate. If such proxy behavior is expected it is necessary to add the + CA certificate chain containing the intermediate CA of the SSLBump proxy to + every machine that this code runs on via ca-certs cloud-init directive (via + cloudinit-userdata model-config) or via other means (such as through a + custom charm option). Also note that DNS resolution for the hostname in a + URL is done at a proxy server - not at the client side. + 8-digit (32 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 + 16-digit (64 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 + 40-digit key ID: + https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 + + Args: + keyid: An 8, 16 or 40 hex digit keyid to find a key for + + Returns: + A string contining key material for the specified GPG key id + + + Raises: + subprocess.CalledProcessError + """ + # options=mr - machine-readable output (disables html wrappers) + keyserver_url = ( + "https://keyserver.ubuntu.com" "/pks/lookup?op=get&options=mr&exact=on&search=0x{}" + ) + curl_cmd = ["curl", keyserver_url.format(keyid)] + # use proxy server settings in order to retrieve the key + return check_output(curl_cmd).decode() + + @staticmethod + def _dearmor_gpg_key(key_asc: bytes) -> bytes: + """Convert a GPG key in the ASCII armor format to the binary format. + + Args: + key_asc: A GPG key in ASCII armor format. + + Returns: + A GPG key in binary format as a string + + Raises: + GPGKeyError + """ + ps = subprocess.run(["gpg", "--dearmor"], stdout=PIPE, stderr=PIPE, input=key_asc) + out, err = ps.stdout, ps.stderr.decode() + if "gpg: no valid OpenPGP data found." in err: + raise GPGKeyError( + "Invalid GPG key material. Check your network setup" + " (MTU, routing, DNS) and/or proxy server settings" + " as well as destination keyserver status." + ) + else: + return out + + @staticmethod + def _write_apt_gpg_keyfile(key_name: str, key_material: bytes) -> None: + """Write GPG key material into a file at a provided path. + + Args: + key_name: A key name to use for a key file (could be a fingerprint) + key_material: A GPG key material (binary) + """ + with open(key_name, "wb") as keyf: + keyf.write(key_material) + + +class RepositoryMapping(Mapping): + """An representation of known repositories. + + Instantiation of `RepositoryMapping` will iterate through the + filesystem, parse out repository files in `/etc/apt/...`, and create + `DebianRepository` objects in this list. + + Typical usage: + + repositories = apt.RepositoryMapping() + repositories.add(DebianRepository( + enabled=True, repotype="deb", uri="https://example.com", release="focal", + groups=["universe"] + )) + """ + + def __init__(self): + self._repository_map = {} + # Repositories that we're adding -- used to implement mode param + self.default_file = "/etc/apt/sources.list" + + # read sources.list if it exists + if os.path.isfile(self.default_file): + self.load(self.default_file) + + # read sources.list.d + for file in glob.iglob("/etc/apt/sources.list.d/*.list"): + self.load(file) + + def __contains__(self, key: str) -> bool: + """Magic method for checking presence of repo in mapping.""" + return key in self._repository_map + + def __len__(self) -> int: + """Return number of repositories in map.""" + return len(self._repository_map) + + def __iter__(self) -> Iterable[DebianRepository]: + """Return iterator for RepositoryMapping.""" + return iter(self._repository_map.values()) + + def __getitem__(self, repository_uri: str) -> DebianRepository: + """Return a given `DebianRepository`.""" + return self._repository_map[repository_uri] + + def __setitem__(self, repository_uri: str, repository: DebianRepository) -> None: + """Add a `DebianRepository` to the cache.""" + self._repository_map[repository_uri] = repository + + def load(self, filename: str): + """Load a repository source file into the cache. + + Args: + filename: the path to the repository file + """ + parsed = [] + skipped = [] + with open(filename, "r") as f: + for n, line in enumerate(f): + try: + repo = self._parse(line, filename) + except InvalidSourceError: + skipped.append(n) + else: + repo_identifier = "{}-{}-{}".format(repo.repotype, repo.uri, repo.release) + self._repository_map[repo_identifier] = repo + parsed.append(n) + logger.debug("parsed repo: '%s'", repo_identifier) + + if skipped: + skip_list = ", ".join(str(s) for s in skipped) + logger.debug("skipped the following lines in file '%s': %s", filename, skip_list) + + if parsed: + logger.info("parsed %d apt package repositories", len(parsed)) + else: + raise InvalidSourceError("all repository lines in '{}' were invalid!".format(filename)) + + @staticmethod + def _parse(line: str, filename: str) -> DebianRepository: + """Parse a line in a sources.list file. + + Args: + line: a single line from `load` to parse + filename: the filename being read + + Raises: + InvalidSourceError if the source type is unknown + """ + enabled = True + repotype = uri = release = gpg_key = "" + options = {} + groups = [] + + line = line.strip() + if line.startswith("#"): + enabled = False + line = line[1:] + + # Check for "#" in the line and treat a part after it as a comment then strip it off. + i = line.find("#") + if i > 0: + line = line[:i] + + # Split a source into substrings to initialize a new repo. + source = line.strip() + if source: + # Match any repo options, and get a dict representation. + for v in re.findall(OPTIONS_MATCHER, source): + opts = dict(o.split("=") for o in v.strip("[]").split()) + # Extract the 'signed-by' option for the gpg_key + gpg_key = opts.pop("signed-by", "") + options = opts + + # Remove any options from the source string and split the string into chunks + source = re.sub(OPTIONS_MATCHER, "", source) + chunks = source.split() + + # Check we've got a valid list of chunks + if len(chunks) < 3 or chunks[0] not in VALID_SOURCE_TYPES: + raise InvalidSourceError("An invalid sources line was found in %s!", filename) + + repotype = chunks[0] + uri = chunks[1] + release = chunks[2] + groups = chunks[3:] + + return DebianRepository( + enabled, repotype, uri, release, groups, filename, gpg_key, options + ) + else: + raise InvalidSourceError("An invalid sources line was found in %s!", filename) + + def add(self, repo: DebianRepository, default_filename: Optional[bool] = False) -> None: + """Add a new repository to the system. + + Args: + repo: a `DebianRepository` object + default_filename: an (Optional) filename if the default is not desirable + """ + new_filename = "{}-{}.list".format( + DebianRepository.prefix_from_uri(repo.uri), repo.release.replace("/", "-") + ) + + fname = repo.filename or new_filename + + options = repo.options if repo.options else {} + if repo.gpg_key: + options["signed-by"] = repo.gpg_key + + with open(fname, "wb") as f: + f.write( + ( + "{}".format("#" if not repo.enabled else "") + + "{} {}{} ".format(repo.repotype, repo.make_options_string(), repo.uri) + + "{} {}\n".format(repo.release, " ".join(repo.groups)) + ).encode("utf-8") + ) + + self._repository_map["{}-{}-{}".format(repo.repotype, repo.uri, repo.release)] = repo + + def disable(self, repo: DebianRepository) -> None: + """Remove a repository. Disable by default. + + Args: + repo: a `DebianRepository` to disable + """ + searcher = "{} {}{} {}".format( + repo.repotype, repo.make_options_string(), repo.uri, repo.release + ) + + for line in fileinput.input(repo.filename, inplace=True): + if re.match(r"^{}\s".format(re.escape(searcher)), line): + print("# {}".format(line), end="") + else: + print(line, end="") + + self._repository_map["{}-{}-{}".format(repo.repotype, repo.uri, repo.release)] = repo diff --git a/lib/charms/operator_libs_linux/v1/systemd.py b/lib/charms/operator_libs_linux/v1/systemd.py new file mode 100644 index 0000000..cdcbad6 --- /dev/null +++ b/lib/charms/operator_libs_linux/v1/systemd.py @@ -0,0 +1,288 @@ +# Copyright 2021 Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Abstractions for stopping, starting and managing system services via systemd. + +This library assumes that your charm is running on a platform that uses systemd. E.g., +Centos 7 or later, Ubuntu Xenial (16.04) or later. + +For the most part, we transparently provide an interface to a commonly used selection of +systemd commands, with a few shortcuts baked in. For example, service_pause and +service_resume with run the mask/unmask and enable/disable invocations. + +Example usage: + +```python +from charms.operator_libs_linux.v0.systemd import service_running, service_reload + +# Start a service +if not service_running("mysql"): + success = service_start("mysql") + +# Attempt to reload a service, restarting if necessary +success = service_reload("nginx", restart_on_failure=True) +``` +""" + +__all__ = [ # Don't export `_systemctl`. (It's not the intended way of using this lib.) + "SystemdError", + "daemon_reload", + "service_disable", + "service_enable", + "service_failed", + "service_pause", + "service_reload", + "service_restart", + "service_resume", + "service_running", + "service_start", + "service_stop", +] + +import logging +import subprocess + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "045b0d179f6b4514a8bb9b48aee9ebaf" + +# Increment this major API version when introducing breaking changes +LIBAPI = 1 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 4 + + +class SystemdError(Exception): + """Custom exception for SystemD related errors.""" + + +def _systemctl(*args: str, check: bool = False) -> int: + """Control a system service using systemctl. + + Args: + *args: Arguments to pass to systemctl. + check: Check the output of the systemctl command. Default: False. + + Returns: + Returncode of systemctl command execution. + + Raises: + SystemdError: Raised if calling systemctl returns a non-zero returncode and check is True. + """ + cmd = ["systemctl", *args] + logger.debug(f"Executing command: {cmd}") + try: + proc = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + bufsize=1, + encoding="utf-8", + check=check, + ) + logger.debug( + f"Command {cmd} exit code: {proc.returncode}. systemctl output:\n{proc.stdout}" + ) + return proc.returncode + except subprocess.CalledProcessError as e: + raise SystemdError( + f"Command {cmd} failed with returncode {e.returncode}. systemctl output:\n{e.stdout}" + ) + + +def service_running(service_name: str) -> bool: + """Report whether a system service is running. + + Args: + service_name: The name of the service to check. + + Return: + True if service is running/active; False if not. + """ + # If returncode is 0, this means that is service is active. + return _systemctl("--quiet", "is-active", service_name) == 0 + + +def service_failed(service_name: str) -> bool: + """Report whether a system service has failed. + + Args: + service_name: The name of the service to check. + + Returns: + True if service is marked as failed; False if not. + """ + # If returncode is 0, this means that the service has failed. + return _systemctl("--quiet", "is-failed", service_name) == 0 + + +def service_start(*args: str) -> bool: + """Start a system service. + + Args: + *args: Arguments to pass to `systemctl start` (normally the service name). + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl start ...` returns a non-zero returncode. + """ + return _systemctl("start", *args, check=True) == 0 + + +def service_stop(*args: str) -> bool: + """Stop a system service. + + Args: + *args: Arguments to pass to `systemctl stop` (normally the service name). + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl stop ...` returns a non-zero returncode. + """ + return _systemctl("stop", *args, check=True) == 0 + + +def service_restart(*args: str) -> bool: + """Restart a system service. + + Args: + *args: Arguments to pass to `systemctl restart` (normally the service name). + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl restart ...` returns a non-zero returncode. + """ + return _systemctl("restart", *args, check=True) == 0 + + +def service_enable(*args: str) -> bool: + """Enable a system service. + + Args: + *args: Arguments to pass to `systemctl enable` (normally the service name). + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl enable ...` returns a non-zero returncode. + """ + return _systemctl("enable", *args, check=True) == 0 + + +def service_disable(*args: str) -> bool: + """Disable a system service. + + Args: + *args: Arguments to pass to `systemctl disable` (normally the service name). + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl disable ...` returns a non-zero returncode. + """ + return _systemctl("disable", *args, check=True) == 0 + + +def service_reload(service_name: str, restart_on_failure: bool = False) -> bool: + """Reload a system service, optionally falling back to restart if reload fails. + + Args: + service_name: The name of the service to reload. + restart_on_failure: + Boolean indicating whether to fall back to a restart if the reload fails. + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl reload|restart ...` returns a non-zero returncode. + """ + try: + return _systemctl("reload", service_name, check=True) == 0 + except SystemdError: + if restart_on_failure: + return service_restart(service_name) + else: + raise + + +def service_pause(service_name: str) -> bool: + """Pause a system service. + + Stops the service and prevents the service from starting again at boot. + + Args: + service_name: The name of the service to pause. + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if service is still running after being paused by systemctl. + """ + _systemctl("disable", "--now", service_name) + _systemctl("mask", service_name) + + if service_running(service_name): + raise SystemdError(f"Attempted to pause {service_name!r}, but it is still running.") + + return True + + +def service_resume(service_name: str) -> bool: + """Resume a system service. + + Re-enable starting the service again at boot. Start the service. + + Args: + service_name: The name of the service to resume. + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if service is not running after being resumed by systemctl. + """ + _systemctl("unmask", service_name) + _systemctl("enable", "--now", service_name) + + if not service_running(service_name): + raise SystemdError(f"Attempted to resume {service_name!r}, but it is not running.") + + return True + + +def daemon_reload() -> bool: + """Reload systemd manager configuration. + + Returns: + On success, this function returns True for historical reasons. + + Raises: + SystemdError: Raised if `systemctl daemon-reload` returns a non-zero returncode. + """ + return _systemctl("daemon-reload", check=True) == 0 diff --git a/metadata.yaml b/metadata.yaml index 4f2b2c3..4d67d17 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -1,50 +1,35 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. -# This file populates the Overview on Charmhub. -# See https://juju.is/docs/sdk/metadata-reference for a checklist and guidance. - -# The charm package name, no spaces (required) -# See https://juju.is/docs/sdk/naming#heading--naming-charms for guidance. -name: is-charms-template - -# The following metadata are human-readable and will be published prominently on Charmhub. - -# (Recommended) -display-name: Charm Template - -# (Required) -summary: A very short one-line summary of the charm. -docs: https://discourse.charmhub.io/t/jenkins-agent-documentation-overview/12694 -issues: https://github.com/canonical/is-charms-template-repo/issues -maintainers: +name: jenkins-agent +display-name: Jenkins agent +maintainers: - https://launchpad.net/~canonical-is-devops -source: https://github.com/canonical/is-charms-template-repo - +summary: Jenkins agent machine charm +issues: https://github.com/canonical/jenkins-agent-operator/issues +source: https://github.com/canonical/jenkins-agent-operator +docs: https://discourse.charmhub.io/t/jenkins-agent-documentation-overview/12694 description: | - A single sentence that says what the charm is, concisely and memorably. - - A paragraph of one to three short sentences, that describe what the charm does. - - A third paragraph that explains what need the charm meets. - - Finally, a paragraph that describes whom the charm is useful for. - -# The containers and resources metadata apply to Kubernetes charms only. -# Remove them if not required. - -# Your workload’s containers. -containers: - httpbin: - resource: httpbin-image - -# This field populates the Resources tab on Charmhub. -resources: - # An OCI image resource for each container listed above. - # You may remove this if your charm will run without a workload sidecar container. - httpbin-image: - type: oci-image - description: OCI image for httpbin - # The upstream-source field is ignored by Juju. It is included here as a reference - # so the integration testing suite knows which image to deploy during testing. This field - # is also used by the 'canonical/charming-actions' Github action for automated releasing. - upstream-source: kennethreitz/httpbin + A [Juju](https://juju.is/) [charm](https://juju.is/docs/olm/charmed-operators) + deploying and managing [Jenkins](https://www.jenkins.io/) Agent on machines and + configurable to use a Jenkins charm deployed in another Juju model. + + This charm simplifies initial deployment and "day N" operations of Jenkins Agent + on VMs and bare metal. + + As such, the charm makes it easy for those looking to take control of their own + Agents whilst keeping operations simple, and gives them the freedom to deploy on + the platform of their choice. + + For DevOps or SRE teams this charm will make operating Jenkins Agent simple and + straightforward through Juju's clean interface. It will allow easy deployment + into multiple environments for testing changes, and supports scaling out for + enterprise deployments. +tags: + - application_development + - ops +series: + - focal + - jammy +provides: + agent: + interface: jenkins_agent_v0 diff --git a/pyproject.toml b/pyproject.toml index 0ed8d68..eba55c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,6 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + [tool.bandit] exclude_dirs = ["/venv/"] [tool.bandit.assert_used] @@ -7,13 +10,28 @@ skips = ["*/*test.py", "*/test_*.py", "*tests/*.py"] [tool.coverage.run] branch = true +[tool.coverage.report] +# fail_under = 99 +show_missing = true + +[tool.pytest.ini_options] +minversion = "6.0" +log_cli_level = "INFO" + +[tool.pylint] +disable = "fixme" + +[tool.pylint.'MESSAGES CONTROL'] +extension-pkg-whitelist = "pydantic" + # Formatting tools configuration [tool.black] line-length = 99 target-version = ["py38"] -[tool.coverage.report] -show_missing = true +[tool.isort] +line_length = 99 +profile = "black" # Linting tools configuration [tool.flake8] @@ -25,49 +43,22 @@ select = ["E", "W", "F", "C", "N", "R", "D", "H"] # Ignore W503, E501 because using black creates errors with this # Ignore D107 Missing docstring in __init__ ignore = ["W503", "E501", "D107"] -# D100, D101, D102, D103: Ignore missing docstrings in tests -per-file-ignores = ["tests/*:D100,D101,D102,D103,D104,D205,D212,D415"] +# In tests we can ignore +# D100, D101, D102, D103: missing docstrings +# D205, D212: docstring formatting (1 blank line required, multiline docstring summary) +per-file-ignores = ["tests/*:D100,D101,D102,D103,D104,D205,D212"] docstring-convention = "google" - -[tool.isort] -line_length = 99 -profile = "black" +# Check for properly formatted copyright header in each file +copyright-check = "True" +copyright-author = "Canonical Ltd." +copyright-regexp = "Copyright\\s\\d{4}([-,]\\d{4})*\\s+%(author)s" [tool.mypy] ignore_missing_imports = true -explicit_package_bases = true -namespace_packages = true - -[tool.pylint] -disable = "wrong-import-order" - -[tool.pytest.ini_options] -minversion = "6.0" -log_cli_level = "INFO" - -# Linting tools configuration -[tool.ruff] -line-length = 99 -select = ["E", "W", "F", "C", "N", "D", "I001"] -extend-ignore = [ - "D203", - "D204", - "D213", - "D215", - "D400", - "D404", - "D406", - "D407", - "D408", - "D409", - "D413", -] -ignore = ["E501", "D107"] -extend-exclude = ["__pycache__", "*.egg_info"] -per-file-ignores = {"tests/*" = ["D100","D101","D102","D103","D104"]} - -[tool.ruff.mccabe] -max-complexity = 10 +check_untyped_defs = true +disallow_untyped_defs = true +plugins = ["pydantic.mypy"] -[tool.codespell] -skip = "build,lib,venv,icon.svg,.tox,.git,.mypy_cache,.ruff_cache,.coverage" +[[tool.mypy.overrides]] +module = "tests.*" +disallow_untyped_defs = false diff --git a/requirements.txt b/requirements.txt index aaa16b1..dd6a745 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,5 @@ -ops >= 2.2.0 +ops==2.9.0 +requests==2.31.0 +pydantic==1.10.13 +jinja2==3.1.2 +python-dotenv==1.0.0 diff --git a/src-docs/agent_observer.py.md b/src-docs/agent_observer.py.md new file mode 100644 index 0000000..0d6e2de --- /dev/null +++ b/src-docs/agent_observer.py.md @@ -0,0 +1,45 @@ + + + + +# module `agent_observer.py` +The agent relation observer module. + +**Global Variables** +--------------- +- **AGENT_RELATION** + + +--- + +## class `Observer` +The Jenkins agent relation observer. + + + +### function `__init__` + +```python +__init__( + charm: CharmBase, + state: State, + jenkins_agent_service: JenkinsAgentService +) +``` + +Initialize the observer and register event handlers. + + + +**Args:** + + - `charm`: The parent charm to attach the observer to. + - `state`: The charm state. + - `jenkins_agent_service`: Service manager that controls Jenkins agent service. + + +--- + +#### property model + +Shortcut for more simple access the model. diff --git a/src-docs/charm.py.md b/src-docs/charm.py.md new file mode 100644 index 0000000..4a22aab --- /dev/null +++ b/src-docs/charm.py.md @@ -0,0 +1,89 @@ + + + + +# module `charm.py` +Charm jenkins agent. + +**Global Variables** +--------------- +- **AGENT_RELATION** + + +--- + +## class `JenkinsAgentCharm` +Charm Jenkins agent. + + + +### function `__init__` + +```python +__init__(*args: Any) +``` + +Initialize the charm and register event handlers. + + + +**Args:** + + - `args`: Arguments to initialize the charm base. + + +--- + +#### property app + +Application that this unit is part of. + +--- + +#### property charm_dir + +Root directory of the charm as it is running. + +--- + +#### property config + +A mapping containing the charm's config and current values. + +--- + +#### property meta + +Metadata of this charm. + +--- + +#### property model + +Shortcut for more simple access the model. + +--- + +#### property unit + +Unit that this execution is responsible for. + + + +--- + + + +### function `restart_agent_service` + +```python +restart_agent_service() → None +``` + +Restart the jenkins agent charm. + + + +**Raises:** + + - `RuntimeError`: when the service fails to properly start. diff --git a/src-docs/charm_state.py.md b/src-docs/charm_state.py.md new file mode 100644 index 0000000..d77a4da --- /dev/null +++ b/src-docs/charm_state.py.md @@ -0,0 +1,122 @@ + + + + +# module `charm_state.py` +The module for managing charm state. + +**Global Variables** +--------------- +- **AGENT_RELATION** + + +--- + +## class `AgentMeta` +The Jenkins agent metadata. + +Attrs: executors: The number of executors available on the unit. labels: The comma separated labels to assign to the agent. name: The name of the agent. + + + + +--- + + + +### function `as_dict` + +```python +as_dict() → Dict[str, str] +``` + +Return dictionary representation of agent metadata. + + + +**Returns:** + A dictionary adhering to jenkins_agent_v0 interface. + + +--- + +## class `Credentials` +The credentials used to register to the Jenkins server. + +Attrs: address: The Jenkins server address to register to. secret: The secret used to register agent. + + + + + +--- + +## class `InvalidStateError` +Exception raised when state configuration is invalid. + + + +### function `__init__` + +```python +__init__(msg: str = '') +``` + +Initialize a new instance of the InvalidStateError exception. + + + +**Args:** + + - `msg`: Explanation of the error. + + + + + +--- + +## class `State` +The Jenkins agent state. + +Attrs: agent_meta: The Jenkins agent metadata to register on Jenkins server. agent_relation_credentials: The full set of credentials from the agent relation. None if partial data is set or the credentials do not belong to current agent. unit_data: Data about the current unit. jenkins_agent_service_name: The Jenkins agent workload container name. + + + + +--- + + + +### classmethod `from_charm` + +```python +from_charm(charm: CharmBase) → State +``` + +Initialize the state from charm. + + + +**Args:** + + - `charm`: The root Jenkins agent charm. + + + +**Raises:** + + - `InvalidStateError`: if invalid state values were encountered. + + + +**Returns:** + Current state of Jenkins agent. + + +--- + +## class `UnitData` +The charm's unit data. + +Attrs: series: The base of the machine on which the charm is running. diff --git a/src-docs/service.py.md b/src-docs/service.py.md new file mode 100644 index 0000000..32f4869 --- /dev/null +++ b/src-docs/service.py.md @@ -0,0 +1,137 @@ + + + + +# module `service.py` +The agent pebble service module. + +**Global Variables** +--------------- +- **AGENT_SERVICE_NAME** +- **AGENT_PACKAGE_NAME** +- **SYSTEMD_SERVICE_CONF_DIR** +- **PPA_URI** +- **PPA_DEB_SRC** +- **PPA_GPG_KEY_ID** +- **STARTUP_CHECK_TIMEOUT** +- **STARTUP_CHECK_INTERVAL** + + +--- + +## class `FileRenderError` +Exception raised when failing to interact with a file in the filesystem. + + + + + +--- + +## class `JenkinsAgentService` +Jenkins agent service class. + +Attrs: is_active: Indicate if the agent service is active and running. + + + +### function `__init__` + +```python +__init__(state: State) +``` + +Initialize the jenkins agent service. + + + +**Args:** + + - `state`: The Jenkins agent state. + + +--- + +#### property is_active + +Indicate if the jenkins agent service is active. + + + +--- + + + +### function `install` + +```python +install() → None +``` + +Install and set up the jenkins agent apt package. + + + +**Raises:** + + - `PackageInstallError`: if the package installation failed. + +--- + + + +### function `restart` + +```python +restart() → None +``` + +Start the agent service. + + + +**Raises:** + + - `ServiceRestartError`: when restarting the service fails + +--- + + + +### function `stop` + +```python +stop() → None +``` + +Stop the agent service. + + + +**Raises:** + + - `ServiceStopError`: if systemctl stop returns a non-zero exit code. + + +--- + +## class `PackageInstallError` +Exception raised when package installation fails. + + + + + +--- + +## class `ServiceRestartError` +Exception raised when failing to start the agent service. + + + + + +--- + +## class `ServiceStopError` +Exception raised when failing to stop the agent service. diff --git a/src/agent_observer.py b/src/agent_observer.py new file mode 100644 index 0000000..20bd1a1 --- /dev/null +++ b/src/agent_observer.py @@ -0,0 +1,96 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The agent relation observer module.""" + + +import logging + +import ops + +import service +from charm_state import AGENT_RELATION, State + +logger = logging.getLogger() + + +class Observer(ops.Object): + """The Jenkins agent relation observer.""" + + def __init__( + self, + charm: ops.CharmBase, + state: State, + jenkins_agent_service: service.JenkinsAgentService, + ): + """Initialize the observer and register event handlers. + + Args: + charm: The parent charm to attach the observer to. + state: The charm state. + jenkins_agent_service: Service manager that controls Jenkins agent service. + """ + super().__init__(charm, "agent-observer") + self.charm = charm + self.state = state + self.jenkins_agent_service = jenkins_agent_service + charm.framework.observe( + charm.on[AGENT_RELATION].relation_joined, self._on_agent_relation_joined + ) + charm.framework.observe( + charm.on[AGENT_RELATION].relation_changed, self._on_agent_relation_changed + ) + charm.framework.observe( + charm.on[AGENT_RELATION].relation_departed, self._on_agent_relation_departed + ) + + def _on_agent_relation_joined(self, event: ops.RelationJoinedEvent) -> None: + """Handle agent relation joined event. + + Args: + event: The event fired when an agent has joined the relation. + """ + self.charm.unit.status = ops.MaintenanceStatus( + f"Setting up '{event.relation.name}' relation." + ) + + relation_data = self.state.agent_meta.as_dict() + logger.debug("Setting agent relation unit data: %s", relation_data) + event.relation.data[self.charm.unit].update(relation_data) + + def _on_agent_relation_changed(self, _: ops.RelationChangedEvent) -> None: + """Handle agent relation changed event. + + Raises: + RuntimeError: when the service fails to properly start. + """ + # Check if the jenkins agent service has started and set agent ready. + # This is to prevent relation data from other units to trigger a service restart. + if self.jenkins_agent_service.is_active: + logger.warning("Given agent already registered. Skipping.") + return + + # If relation data is not yet available to this unit, set its status to waiting + if not self.state.agent_relation_credentials: + self.charm.unit.status = ops.WaitingStatus("Waiting for complete relation data.") + logger.info("Waiting for complete relation data.") + return + + # Try to start the service with the obtained credentials from relation data + self.charm.unit.status = ops.MaintenanceStatus("Starting jenkins agent service") + try: + self.jenkins_agent_service.restart() + except service.ServiceRestartError as exc: + logger.error("Error restarting the agent service %s", exc) + raise RuntimeError("Error restarting the agent service.") from exc + + self.charm.unit.status = ops.ActiveStatus() + + def _on_agent_relation_departed(self, _: ops.RelationDepartedEvent) -> None: + """Handle agent relation departed event.""" + try: + self.jenkins_agent_service.stop() + except service.ServiceStopError: + self.charm.unit.status = ops.BlockedStatus("Error stopping the agent service") + return + self.charm.unit.status = ops.BlockedStatus("Waiting for config/relation.") diff --git a/src/charm.py b/src/charm.py index 46227b1..2fb657a 100755 --- a/src/charm.py +++ b/src/charm.py @@ -3,113 +3,97 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. -# Learn more at: https://juju.is/docs/sdk +"""Charm jenkins agent.""" -"""Charm the service. - -Refer to the following post for a quick-start guide that will help you -develop a new k8s charm using the Operator Framework: - -https://discourse.charmhub.io/t/4208 -""" import logging +import typing import ops +from ops.main import main -# Log messages can be retrieved using juju debug-log -logger = logging.getLogger(__name__) +import agent_observer +import service +from charm_state import AGENT_RELATION, InvalidStateError, State -VALID_LOG_LEVELS = ["info", "debug", "warning", "error", "critical"] +logger = logging.getLogger() -class IsCharmsTemplateCharm(ops.CharmBase): - """Charm the service.""" +class JenkinsAgentCharm(ops.CharmBase): + """Charm Jenkins agent.""" - def __init__(self, *args): - """Construct. + def __init__(self, *args: typing.Any): + """Initialize the charm and register event handlers. Args: - args: Arguments passed to the CharmBase parent constructor. + args: Arguments to initialize the charm base. """ super().__init__(*args) - self.framework.observe(self.on.httpbin_pebble_ready, self._on_httpbin_pebble_ready) + try: + self.state = State.from_charm(self) + except InvalidStateError as e: + logger.debug("Error parsing charm_state %s", e) + self.unit.status = ops.BlockedStatus(e.msg) + return + + self.jenkins_agent_service = service.JenkinsAgentService(self.state) + self.agent_observer = agent_observer.Observer(self, self.state, self.jenkins_agent_service) + + self.framework.observe(self.on.install, self._on_install) + self.framework.observe(self.on.start, self._on_start) self.framework.observe(self.on.config_changed, self._on_config_changed) + self.framework.observe(self.on.upgrade_charm, self._on_upgrade_charm) - def _on_httpbin_pebble_ready(self, event: ops.PebbleReadyEvent): - """Define and start a workload using the Pebble API. - - Change this example to suit your needs. You'll need to specify the right entrypoint and - environment configuration for your specific workload. + def _on_install(self, _: ops.InstallEvent) -> None: + """Handle install event, setup the agent service. - Learn more about interacting with Pebble at at https://juju.is/docs/sdk/pebble. - - Args: - event: event triggering the handler. + Raises: + RuntimeError: when the installation of the agent service fails """ - # Get a reference the container attribute on the PebbleReadyEvent - container = event.workload - # Add initial Pebble config layer using the Pebble API - container.add_layer("httpbin", self._pebble_layer, combine=True) - # Make Pebble reevaluate its plan, ensuring any services are started if enabled. - container.replan() - # Learn more about statuses in the SDK docs: - # https://juju.is/docs/sdk/constructs#heading--statuses - self.unit.status = ops.ActiveStatus() + try: + self.jenkins_agent_service.install() + except service.PackageInstallError as exc: + logger.error("Error installing the agent service %s", exc) + raise RuntimeError("Error installing the agent service") from exc + + def _on_config_changed(self, _: ops.ConfigChangedEvent) -> None: + """Handle config changed event. Update the agent's label in the relation's databag.""" + if agent_relation := self.model.get_relation(AGENT_RELATION): + relation_data = self.state.agent_meta.as_dict() + agent_relation.data[self.unit].update(relation_data) + + def _on_upgrade_charm(self, _: ops.UpgradeCharmEvent) -> None: + """Handle upgrade charm event.""" + self.restart_agent_service() + + def _on_start(self, _: ops.EventBase) -> None: + """Handle on start event.""" + self.restart_agent_service() + + def restart_agent_service(self) -> None: + """Restart the jenkins agent charm. + + Raises: + RuntimeError: when the service fails to properly start. + """ + if not self.model.get_relation(AGENT_RELATION): + self.model.unit.status = ops.BlockedStatus("Waiting for relation.") + return - def _on_config_changed(self, event: ops.ConfigChangedEvent): - """Handle changed configuration. + if not self.state.agent_relation_credentials: + self.model.unit.status = ops.WaitingStatus("Waiting for complete relation data.") + logger.info("Waiting for complete relation data.") + return - Change this example to suit your needs. If you don't need to handle config, you can remove - this method. + self.model.unit.status = ops.MaintenanceStatus("Starting agent service.") + try: + self.jenkins_agent_service.restart() + except service.ServiceRestartError as exc: + logger.error("Error restarting the agent service %s", exc) + raise RuntimeError("Error restarting the agent service") from exc - Learn more about config at https://juju.is/docs/sdk/config + self.model.unit.status = ops.ActiveStatus() - Args: - event: event triggering the handler. - """ - # Fetch the new config value - log_level = self.model.config["log-level"].lower() - - # Do some validation of the configuration option - if log_level in VALID_LOG_LEVELS: - # The config is good, so update the configuration of the workload - container = self.unit.get_container("httpbin") - # Verify that we can connect to the Pebble API in the workload container - if container.can_connect(): - # Push an updated layer with the new config - container.add_layer("httpbin", self._pebble_layer, combine=True) - container.replan() - - logger.debug("Log level for gunicorn changed to '%s'", log_level) - self.unit.status = ops.ActiveStatus() - else: - # We were unable to connect to the Pebble API, so we defer this event - event.defer() - self.unit.status = ops.WaitingStatus("waiting for Pebble API") - else: - # In this case, the config option is bad, so block the charm and notify the operator. - self.unit.status = ops.BlockedStatus("invalid log level: '{log_level}'") - - @property - def _pebble_layer(self): - """Return a dictionary representing a Pebble layer.""" - return { - "summary": "httpbin layer", - "description": "pebble config layer for httpbin", - "services": { - "httpbin": { - "override": "replace", - "summary": "httpbin", - "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", - "startup": "enabled", - "environment": { - "GUNICORN_CMD_ARGS": f"--log-level {self.model.config['log-level']}" - }, - } - }, - } - - -if __name__ == "__main__": # pragma: nocover - ops.main.main(IsCharmsTemplateCharm) + +if __name__ == "__main__": # pragma: no cover + main(JenkinsAgentCharm) diff --git a/src/charm_state.py b/src/charm_state.py new file mode 100644 index 0000000..17110a9 --- /dev/null +++ b/src/charm_state.py @@ -0,0 +1,180 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The module for managing charm state.""" + + +import logging +import os +import typing +from dataclasses import dataclass + +import ops +from dotenv import dotenv_values +from pydantic import BaseModel, Field, ValidationError, tools +from typing_extensions import Literal + +# agent relation name +AGENT_RELATION = "agent" + +logger = logging.getLogger() + + +class Credentials(BaseModel): + """The credentials used to register to the Jenkins server. + + Attrs: + address: The Jenkins server address to register to. + secret: The secret used to register agent. + """ + + address: str + secret: str + + +class AgentMeta(BaseModel): + """The Jenkins agent metadata. + + Attrs: + executors: The number of executors available on the unit. + labels: The comma separated labels to assign to the agent. + name: The name of the agent. + """ + + labels: str + name: str + executors: int = Field(..., ge=1) + + def as_dict(self) -> typing.Dict[str, str]: + """Return dictionary representation of agent metadata. + + Returns: + A dictionary adhering to jenkins_agent_v0 interface. + """ + return { + "executors": str(self.executors), + "labels": self.labels, + "name": self.name, + } + + +class UnitData(BaseModel): + """The charm's unit data. + + Attrs: + series: The base of the machine on which the charm is running. + """ + + series: Literal["focal", "jammy"] + + +class InvalidStateError(Exception): + """Exception raised when state configuration is invalid.""" + + def __init__(self, msg: str = ""): + """Initialize a new instance of the InvalidStateError exception. + + Args: + msg: Explanation of the error. + """ + self.msg = msg + + +def _get_jenkins_unit( + all_units: typing.Set[ops.Unit], current_app_name: str +) -> typing.Optional[ops.Unit]: + """Get the Jenkins charm unit in a relation. + + Args: + all_units: All units in a relation. + current_app_name: The Jenkins-agent application name. + + Returns: + The Jenkins server application unit in the relation if found. None otherwise. + """ + jenkins_unit = [unit for unit in all_units if unit.app.name != current_app_name] + return jenkins_unit[0] if jenkins_unit else None + + +def _get_credentials_from_agent_relation( + server_unit_databag: ops.RelationDataContent, unit_name: str +) -> typing.Optional[Credentials]: + """Import server metadata from databag in agent relation. + + Args: + server_unit_databag: The relation databag content from agent relation. + unit_name: The agent unit name. + + Returns: + Metadata if complete values(url, secret) are set. None otherwise. + """ + address = server_unit_databag.get("url") + secret = server_unit_databag.get(f"{unit_name}_secret") + if not address or not secret: + return None + return Credentials(address=address, secret=secret) + + +@dataclass +class State: + """The Jenkins agent state. + + Attrs: + agent_meta: The Jenkins agent metadata to register on Jenkins server. + agent_relation_credentials: The full set of credentials from the agent relation. None if + partial data is set or the credentials do not belong to current agent. + unit_data: Data about the current unit. + jenkins_agent_service_name: The Jenkins agent workload container name. + """ + + agent_meta: AgentMeta + agent_relation_credentials: typing.Optional[Credentials] + unit_data: UnitData + jenkins_agent_service_name: str = "jenkins-agent" + + @classmethod + def from_charm(cls, charm: ops.CharmBase) -> "State": + """Initialize the state from charm. + + Args: + charm: The root Jenkins agent charm. + + Raises: + InvalidStateError: if invalid state values were encountered. + + Returns: + Current state of Jenkins agent. + """ + try: + agent_meta = AgentMeta( + executors=tools.parse_obj_as(int, os.cpu_count()), + labels=charm.model.config.get("jenkins_agent_labels", "") or os.uname().machine, + name=charm.unit.name.replace("/", "-"), + ) + except ValidationError as exc: + logging.error("Invalid executor state, %s", exc) + raise InvalidStateError("Invalid executor state.") from exc + + agent_relation = charm.model.get_relation(AGENT_RELATION) + agent_relation_credentials: typing.Optional[Credentials] = None + if agent_relation and ( + agent_relation_jenkins_unit := _get_jenkins_unit(agent_relation.units, charm.app.name) + ): + agent_relation_credentials = _get_credentials_from_agent_relation( + agent_relation.data[agent_relation_jenkins_unit], agent_meta.name + ) + + # Load series information + os_release: dict = dotenv_values("/etc/os-release") + unit_series = os_release.get("UBUNTU_CODENAME") + try: + unit_data = UnitData(series=unit_series) + except ValidationError as exc: + logging.error("Unsupported series, %s: %s", unit_series, exc) + raise InvalidStateError("Unsupported series.") from exc + + return cls( + agent_meta=agent_meta, + agent_relation_credentials=agent_relation_credentials, + unit_data=unit_data, + ) diff --git a/src/service.py b/src/service.py new file mode 100644 index 0000000..25ed795 --- /dev/null +++ b/src/service.py @@ -0,0 +1,190 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""The agent pebble service module.""" + +import logging +import os +import pwd +import time +from pathlib import Path + +from charms.operator_libs_linux.v0 import apt +from charms.operator_libs_linux.v1 import systemd +from jinja2 import Template + +from charm_state import State + +logger = logging.getLogger(__name__) +AGENT_SERVICE_NAME = "jenkins-agent" +AGENT_PACKAGE_NAME = "jenkins-agent" +SYSTEMD_SERVICE_CONF_DIR = "/etc/systemd/system/jenkins-agent.service.d/" +PPA_URI = "https://ppa.launchpadcontent.net/canonical-is-devops/jenkins-agent-charm/ubuntu/" +PPA_DEB_SRC = "deb-https://ppa.launchpadcontent.net/canonical-is-devops/jenkins-agent-charm/ubuntu/-" # noqa: E501 pylint: disable=line-too-long +PPA_GPG_KEY_ID = "ad4196d35c25cdac" +STARTUP_CHECK_TIMEOUT = 30 +STARTUP_CHECK_INTERVAL = 2 +JENKINS_HOME = Path("/var/lib/jenkins") +AGENT_READY_PATH = Path(JENKINS_HOME / ".ready") + + +class PackageInstallError(Exception): + """Exception raised when package installation fails.""" + + +class ServiceRestartError(Exception): + """Exception raised when failing to start the agent service.""" + + +class ServiceStopError(Exception): + """Exception raised when failing to stop the agent service.""" + + +class FileRenderError(Exception): + """Exception raised when failing to interact with a file in the filesystem.""" + + +class JenkinsAgentService: + """Jenkins agent service class. + + Attrs: + is_active: Indicate if the agent service is active and running. + """ + + def __init__(self, state: State): + """Initialize the jenkins agent service. + + Args: + state: The Jenkins agent state. + """ + self.state = state + + def _render_file(self, path: Path, content: str, mode: int) -> None: + """Write a content rendered from a template to a file. + + Args: + path: Path object to the file. + content: the data to be written to the file. + mode: access permission mask applied to the + file using chmod (e.g. 0o640). + + Raises: + FileRenderError: if interaction with the filesystem fails + """ + try: + path.write_text(content) + os.chmod(path, mode) + # Get the uid/gid for the root user (running the service). + # TODO: the user running the jenkins agent is currently root + # we should replace this by defining a dedicated user in the apt package + u = pwd.getpwnam("root") + # Set the correct ownership for the file. + os.chown(path, uid=u.pw_uid, gid=u.pw_gid) + except (OSError, KeyError, TypeError) as exc: + raise FileRenderError(f"Error rendering file:\n{exc}") from exc + + @property + def is_active(self) -> bool: + """Indicate if the jenkins agent service is active.""" + try: + return systemd.service_running(AGENT_SERVICE_NAME) + except SystemError as exc: + logger.error("Failed to call systemctl:\n%s", exc) + return False + + def install(self) -> None: + """Install and set up the jenkins agent apt package. + + Raises: + PackageInstallError: if the package installation failed. + """ + try: + # Add ppa that hosts the jenkins-agent package + series = self.state.unit_data.series + repositories = apt.RepositoryMapping() + if f"{PPA_DEB_SRC}-{series}" not in repositories: + repositories.add( + apt.DebianRepository( + enabled=True, + repotype="deb", + uri=PPA_URI, + release=series, + groups=["main"], + ) + ) + apt.import_key(PPA_GPG_KEY_ID) + # Install the necessary packages + apt.update() + apt.add_package("openjdk-17-jre") + apt.add_package(AGENT_PACKAGE_NAME) + except (apt.PackageError, apt.PackageNotFoundError, apt.GPGKeyError) as exc: + raise PackageInstallError("Error installing the agent package") from exc + + def restart(self) -> None: + """Start the agent service. + + Raises: + ServiceRestartError: when restarting the service fails + """ + # Render template and write to appropriate file if only credentials are set + credentials = self.state.agent_relation_credentials + if not credentials: + raise ServiceRestartError("Error starting the agent service: missing configuration") + + with open("templates/jenkins_agent_env.conf.j2", "r", encoding="utf-8") as file: + template = Template(file.read()) + # fetch credentials and set them as environments + environments = { + "JENKINS_TOKEN": credentials.secret, + "JENKINS_URL": credentials.address, + "JENKINS_AGENT": self.state.agent_meta.name, + } + # render template file + rendered = template.render(environments=environments) + # Ensure that service conf directory exist + config_dir = Path(SYSTEMD_SERVICE_CONF_DIR) + config_dir.mkdir(parents=True, exist_ok=True) + # Write the conf file + logger.info("Rendering agent configuration") + logger.debug("%s", environments) + config_file = Path(f"{SYSTEMD_SERVICE_CONF_DIR}/override.conf") + try: + self._render_file(config_file, rendered, 0o644) + systemd.daemon_reload() + systemd.service_restart(AGENT_SERVICE_NAME) + except systemd.SystemdError as exc: + raise ServiceRestartError(f"Error starting the agent service:\n{exc}") from exc + except FileRenderError as exc: + raise ServiceRestartError( + "Error interacting with the filesystem when rendering configuration file" + ) from exc + + # Check if the service is running after startup + if not self._startup_check(): + raise ServiceRestartError("Error waiting for the agent service to start") + + def stop(self) -> None: + """Stop the agent service. + + Raises: + ServiceStopError: if systemctl stop returns a non-zero exit code. + """ + try: + systemd.service_stop(AGENT_SERVICE_NAME) + except systemd.SystemdError as exc: + logger.error("service %s failed to stop", AGENT_SERVICE_NAME) + raise ServiceStopError(f"service {AGENT_SERVICE_NAME} failed to stop") from exc + + def _startup_check(self) -> bool: + """Check whether the service was correctly started. + + Returns: + bool: indicate whether the service was started. + """ + timeout = time.time() + STARTUP_CHECK_TIMEOUT + while time.time() < timeout: + time.sleep(STARTUP_CHECK_INTERVAL) + service_up = os.path.exists(str(AGENT_READY_PATH)) and self.is_active + if service_up: + break + return os.path.exists(str(AGENT_READY_PATH)) and self.is_active diff --git a/templates/jenkins_agent_env.conf.j2 b/templates/jenkins_agent_env.conf.j2 new file mode 100644 index 0000000..10be87d --- /dev/null +++ b/templates/jenkins_agent_env.conf.j2 @@ -0,0 +1,4 @@ +[Service] +{%- for key, value in environments.items() %} +Environment="{{key}}={{value}}" +{%- endfor -%} diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..fb1b031 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,4 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Tests for the jenkins-agent operator.""" diff --git a/tests/conftest.py b/tests/conftest.py index ad7716b..2ef7e01 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,10 +1,12 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. -"""Fixtures for charm tests.""" +"""Fixtures for jenkins-agent charm tests.""" +from pytest import Parser -def pytest_addoption(parser): + +def pytest_addoption(parser: Parser): """Parse additional pytest options. Args: diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py index e3979c0..887614c 100644 --- a/tests/integration/__init__.py +++ b/tests/integration/__init__.py @@ -1,2 +1,4 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. + +"""Integration tests module.""" diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py new file mode 100644 index 0000000..28e27de --- /dev/null +++ b/tests/integration/conftest.py @@ -0,0 +1,210 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Fixtures for Jenkins-agent-k8s-operator charm integration tests.""" + +import logging +import secrets +import textwrap +import typing + +import jenkinsapi.jenkins +import ops +import pytest +import pytest_asyncio +from juju.action import Action +from juju.application import Application +from juju.client._definitions import FullStatus, UnitStatus +from juju.model import Controller, Model +from juju.unit import Unit +from pytest_operator.plugin import OpsTest + +logger = logging.getLogger(__name__) + +NUM_AGENT_UNITS = 1 + + +@pytest_asyncio.fixture(scope="function", name="charm") +async def charm_fixture(request: pytest.FixtureRequest, ops_test: OpsTest) -> str: + """The path to charm.""" + charm = request.config.getoption("--charm-file") + if not charm: + charm = await ops_test.build_charm(".") + else: + charm = f"./{charm}" + + return charm + + +@pytest.fixture(scope="function", name="model") +def model_fixture(ops_test: OpsTest) -> Model: + """The testing model.""" + assert ops_test.model + return ops_test.model + + +@pytest_asyncio.fixture( + scope="function", name="jenkins_agent_application", params=["focal", "jammy"] +) +async def application_fixture( + model: Model, charm: str, request: typing.Any +) -> typing.AsyncGenerator[Application, None]: + """Build and deploy the charm.""" + # Deploy the charm and wait for blocked status + application = await model.deploy( + charm, + num_units=NUM_AGENT_UNITS, + series=request.param, + config={"jenkins_agent_labels": "machine"}, + ) + await model.wait_for_idle(apps=[application.name], status=ops.BlockedStatus.name) + + yield application + + await model.remove_application(application.name, block_until_done=True, force=True) + + +@pytest_asyncio.fixture(scope="function", name="k8s_controller") +async def jenkins_server_k8s_controller_fixture() -> typing.AsyncGenerator[Controller, None]: + """The juju controller on microk8s. + The controller is bootstrapped in "pre_run_script.sh". + """ + controller = Controller() + await controller.connect("controller") + cloud = await controller.get_cloud() + logger.info("Creating jenkins server controller on cloud %s", cloud) + + yield controller + + await controller.disconnect() + + +@pytest_asyncio.fixture(scope="function", name="jenkins_server_model") +async def jenkins_server_model_fixture( + k8s_controller: Controller, +) -> typing.AsyncGenerator[Model, None]: + """The model for jenkins-k8s charm.""" + model_name = f"jenkins-k8s-{secrets.token_hex(2)}" + cloud = await k8s_controller.get_cloud() + logger.info("Adding model %s on %s", model_name, cloud) + model = await k8s_controller.add_model(model_name) + + yield model + + await k8s_controller.destroy_models( + model.name, destroy_storage=True, force=True, max_wait=10 * 60 + ) + await model.disconnect() + + +@pytest_asyncio.fixture(scope="function", name="jenkins_server") +async def jenkins_server_fixture(jenkins_server_model: Model) -> Application: + """The jenkins machine server.""" + jenkins = await jenkins_server_model.deploy("jenkins-k8s") + await jenkins_server_model.wait_for_idle( + apps=[jenkins.name], + timeout=20 * 60, + wait_for_active=True, + idle_period=30, + raise_on_error=False, + ) + + return jenkins + + +@pytest_asyncio.fixture(scope="function", name="server_unit_ip") +async def server_unit_ip_fixture(jenkins_server_model: Model, jenkins_server: Application): + """Get Jenkins machine server charm unit IP.""" + status: FullStatus = await jenkins_server_model.get_status([jenkins_server.name]) + try: + unit_status: UnitStatus = next( + iter(status.applications[jenkins_server.name].units.values()) + ) + assert unit_status.address, "Invalid unit address" + return unit_status.address + except StopIteration as exc: + raise StopIteration("Invalid unit status") from exc + + +@pytest_asyncio.fixture(scope="function", name="web_address") +async def web_address_fixture(server_unit_ip: str): + """Get Jenkins machine server charm web address.""" + return f"http://{server_unit_ip}:8080" + + +@pytest_asyncio.fixture(scope="function", name="jenkins_client") +async def jenkins_client_fixture( + jenkins_server: Application, + web_address: str, +) -> jenkinsapi.jenkins.Jenkins: + """The Jenkins API client.""" + jenkins_unit: Unit = jenkins_server.units[0] + action: Action = await jenkins_unit.run_action("get-admin-password") + await action.wait() + assert action.status == "completed", "Failed to get credentials." + password = action.results["password"] + + # Initialization of the jenkins client will raise an exception if unable to connect to the + # server. + return jenkinsapi.jenkins.Jenkins( + baseurl=web_address, username="admin", password=password, timeout=60 + ) + + +def gen_test_job_xml(node_label: str): + """Generate a job xml with target node label. + + Args: + node_label: The node label to assign to job to. + + Returns: + The job XML. + """ + return textwrap.dedent( + f""" + + + + false + + + {node_label} + false + false + false + false + + false + + + echo "hello world" + + + + + + + """ + ) + + +def assert_job_success( + client: jenkinsapi.jenkins.Jenkins, agent_name: str, test_target_label: str +): + """Assert that a job can be created and ran successfully. + + Args: + client: The Jenkins API client. + agent_name: The registered Jenkins agent node to check. + test_target_label: The Jenkins agent node label. + """ + nodes = client.get_nodes() + assert any( + (agent_name in key for key in nodes.keys()) + ), f"Jenkins {agent_name} node not registered." + + job = client.create_job(agent_name, gen_test_job_xml(test_target_label)) + queue_item = job.invoke() + queue_item.block_until_complete() + build: jenkinsapi.build.Build = queue_item.get_build() + assert build.get_status() == "SUCCESS" diff --git a/tests/integration/pre_run_script.sh b/tests/integration/pre_run_script.sh new file mode 100644 index 0000000..6b9b815 --- /dev/null +++ b/tests/integration/pre_run_script.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +# Pre-run script for integration test operator-workflows action. +# https://github.com/canonical/operator-workflows/blob/main/.github/workflows/integration_test.yaml + +# Jenkins machine agent charm is deployed on lxd and Jenkins-k8s server charm is deployed on +# microk8s. + +sg microk8s -c "microk8s status --wait-ready" +# lxd should be installed and inited by a previous step in integration test action. +echo "bootstrapping lxd juju controller" +sg microk8s -c "juju bootstrap localhost localhost" + +echo "bootstrapping secondary microk8s controller" +sg microk8s -c "juju bootstrap microk8s controller" + +echo "Switching to testing model" +sg microk8s -c "juju switch localhost" diff --git a/tests/integration/requirements_integration_tests.txt b/tests/integration/requirements_integration_tests.txt new file mode 100644 index 0000000..8278852 --- /dev/null +++ b/tests/integration/requirements_integration_tests.txt @@ -0,0 +1,6 @@ +jenkinsapi>=0.3,<1 +juju==3.0.4 +ops +pytest +pytest-asyncio +pytest-operator \ No newline at end of file diff --git a/tests/integration/test_agent.py b/tests/integration/test_agent.py new file mode 100644 index 0000000..87892e8 --- /dev/null +++ b/tests/integration/test_agent.py @@ -0,0 +1,65 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. + +"""Integration tests for jenkins-agent-k8s-operator charm.""" + +import logging +import secrets +import string + +import jenkinsapi.jenkins +from juju.application import Application +from juju.model import Model + +from .conftest import NUM_AGENT_UNITS, assert_job_success + +logger = logging.getLogger() + +MICROK8S_CONTROLLER = "controller" + + +def rand_ascii(length: int) -> str: + """Generate random string containing only ascii characters. + + Args: + length: length of the generated string. + + Returns: + Randomly generated ascii string of length {length}. + """ + return "".join(secrets.choice(string.ascii_lowercase) for _ in range(length)) + + +async def test_agent_relation( + jenkins_server: Application, + jenkins_agent_application: Application, + jenkins_client: jenkinsapi.jenkins.Jenkins, +): + """ + arrange: given a cross controller cross model jenkins machine agent. + act: when the offer is created and relation is setup through the offer. + assert: the relation succeeds and agents become active. + """ + agent_cmi_name: str = f"cmi-agent-{rand_ascii(4)}" + jenkins_server_model: Model = jenkins_server.model + logger.info("Creating offer %s:%s", jenkins_server.name, agent_cmi_name) + await jenkins_server_model.create_offer(f"{jenkins_server.name}:agent", agent_cmi_name) + # Machine model of the jenkins agent + model: Model = jenkins_agent_application.model + logger.info( + "cmr: controller:admin/%s.%s", + jenkins_server_model.name, + jenkins_server.name, + ) + await model.relate( + f"{jenkins_agent_application.name}:agent", + f"{MICROK8S_CONTROLLER}:admin/{jenkins_server_model.name}.{agent_cmi_name}", + ) + await model.wait_for_idle(status="active", timeout=1200) + + nodes = jenkins_client.get_nodes() + assert all(node.is_online() for node in nodes.values()) + # One of the nodes is the server node. + assert len(nodes.values()) == NUM_AGENT_UNITS + 1 + + assert_job_success(jenkins_client, jenkins_agent_application.name, "machine") diff --git a/tests/integration/test_charm.py b/tests/integration/test_charm.py deleted file mode 100644 index f212ec1..0000000 --- a/tests/integration/test_charm.py +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -"""Integration tests.""" - -import asyncio -import logging -from pathlib import Path - -import pytest -import yaml -from pytest_operator.plugin import OpsTest - -logger = logging.getLogger(__name__) - -METADATA = yaml.safe_load(Path("./metadata.yaml").read_text(encoding="utf-8")) -APP_NAME = METADATA["name"] - - -@pytest.mark.abort_on_fail -async def test_build_and_deploy(ops_test: OpsTest, pytestconfig: pytest.Config): - """Deploy the charm together with related charms. - - Assert on the unit status before any relations/configurations take place. - """ - # Deploy the charm and wait for active/idle status - charm = pytestconfig.getoption("--charm-file") - resources = {"httpbin-image": METADATA["resources"]["httpbin-image"]["upstream-source"]} - assert ops_test.model - await asyncio.gather( - ops_test.model.deploy( - f"./{charm}", resources=resources, application_name=APP_NAME, series="jammy" - ), - ops_test.model.wait_for_idle( - apps=[APP_NAME], status="active", raise_on_blocked=True, timeout=1000 - ), - ) diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index e3979c0..3dbb54c 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,2 +1,4 @@ # Copyright 2024 Canonical Ltd. # See LICENSE file for licensing details. + +"""Unit tests for the jenkins-agent operator.""" diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py new file mode 100644 index 0000000..177e27b --- /dev/null +++ b/tests/unit/conftest.py @@ -0,0 +1,19 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Fixtures for jenkins-agent charm tests.""" + + +import pytest +from ops.testing import Harness + +from charm import JenkinsAgentCharm + + +@pytest.fixture(scope="function", name="harness") +def harness_fixture(): + """Enable ops test framework harness.""" + harness = Harness(JenkinsAgentCharm) + + yield harness + + harness.cleanup() diff --git a/tests/unit/test_base.py b/tests/unit/test_base.py deleted file mode 100644 index c1ce697..0000000 --- a/tests/unit/test_base.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2024 Canonical Ltd. -# See LICENSE file for licensing details. - -# Learn more about testing at: https://juju.is/docs/sdk/testing - -# pylint: disable=duplicate-code,missing-function-docstring -"""Unit tests.""" - -import unittest - -import ops -import ops.testing - -from charm import IsCharmsTemplateCharm - - -class TestCharm(unittest.TestCase): - """Test class.""" - - def setUp(self): - """Set up the testing environment.""" - self.harness = ops.testing.Harness(IsCharmsTemplateCharm) - self.addCleanup(self.harness.cleanup) - self.harness.begin() - - def test_httpbin_pebble_ready(self): - # Expected plan after Pebble ready with default config - expected_plan = { - "services": { - "httpbin": { - "override": "replace", - "summary": "httpbin", - "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", - "startup": "enabled", - "environment": {"GUNICORN_CMD_ARGS": "--log-level info"}, - } - }, - } - # Simulate the container coming up and emission of pebble-ready event - self.harness.container_pebble_ready("httpbin") - # Get the plan now we've run PebbleReady - updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() - # Check we've got the plan we expected - self.assertEqual(expected_plan, updated_plan) - # Check the service was started - service = self.harness.model.unit.get_container("httpbin").get_service("httpbin") - self.assertTrue(service.is_running()) - # Ensure we set an ActiveStatus with no message - self.assertEqual(self.harness.model.unit.status, ops.ActiveStatus()) - - def test_config_changed_valid_can_connect(self): - # Ensure the simulated Pebble API is reachable - self.harness.set_can_connect("httpbin", True) - # Trigger a config-changed event with an updated value - self.harness.update_config({"log-level": "debug"}) - # Get the plan now we've run PebbleReady - updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() - updated_env = updated_plan["services"]["httpbin"]["environment"] - # Check the config change was effective - self.assertEqual(updated_env, {"GUNICORN_CMD_ARGS": "--log-level debug"}) - self.assertEqual(self.harness.model.unit.status, ops.ActiveStatus()) - - def test_config_changed_valid_cannot_connect(self): - # Trigger a config-changed event with an updated value - self.harness.update_config({"log-level": "debug"}) - # Check the charm is in WaitingStatus - self.assertIsInstance(self.harness.model.unit.status, ops.WaitingStatus) - - def test_config_changed_invalid(self): - # Ensure the simulated Pebble API is reachable - self.harness.set_can_connect("httpbin", True) - # Trigger a config-changed event with an updated value - self.harness.update_config({"log-level": "foobar"}) - # Check the charm is in BlockedStatus - self.assertIsInstance(self.harness.model.unit.status, ops.BlockedStatus) diff --git a/tests/unit/test_charm.py b/tests/unit/test_charm.py new file mode 100644 index 0000000..81a8e6b --- /dev/null +++ b/tests/unit/test_charm.py @@ -0,0 +1,84 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +# pylint: disable=protected-access +"""Test for charm hooks.""" + +from unittest.mock import MagicMock + +import ops +import ops.testing +import pytest + +import charm_state +import service +from charm import JenkinsAgentCharm + + +def raise_exception(exception: Exception): + """Raise exception function for monkeypatching. + + Args: + exception: The exception to raise. + + Raises: + exception: . + """ + raise exception + + +def test___init___invalid_state(harness: ops.testing.Harness, monkeypatch: pytest.MonkeyPatch): + """ + arrange: patched State.from_charm that raises an InvalidState Error. + act: when the JenkinsAgentCharm is initialized. + assert: The agent falls into BlockedStatus. + """ + monkeypatch.setattr( + charm_state.State, + "from_charm", + MagicMock(side_effect=[charm_state.InvalidStateError("Invalid executor message")]), + ) + + harness.begin() + + jenkins_charm: JenkinsAgentCharm = harness.charm + assert jenkins_charm.unit.status.name == ops.BlockedStatus.name + assert jenkins_charm.unit.status.message == "Invalid executor message" + + +def test__on_upgrade_charm(harness: ops.testing.Harness, monkeypatch: pytest.MonkeyPatch): + """ + arrange: given a charm with patched agent service that is active. + act: when _on_upgrade_charm is called. + assert: The agent falls into waiting status with the correct message. + """ + monkeypatch.setattr(service.JenkinsAgentService, "is_active", MagicMock(return_value=True)) + monkeypatch.setattr(service.JenkinsAgentService, "restart", MagicMock()) + harness.begin() + + jenkins_charm: JenkinsAgentCharm = harness.charm + upgrade_charm_event = MagicMock(spec=ops.UpgradeCharmEvent) + jenkins_charm._on_upgrade_charm(upgrade_charm_event) + + assert jenkins_charm.unit.status.message == "Waiting for relation." + assert jenkins_charm.unit.status.name == ops.BlockedStatus.name + + +def test__on_config_changed(harness: ops.testing.Harness, monkeypatch: pytest.MonkeyPatch): + """ + arrange: given a charm with patched relation. + act: when _on_config_changed is called. + assert: The charm correctly updates the relation databag. + """ + harness.begin() + config_changed_event = MagicMock(spec=ops.ConfigChangedEvent) + get_relation_mock = MagicMock() + monkeypatch.setattr(ops.Model, "get_relation", get_relation_mock) + + jenkins_charm: JenkinsAgentCharm = harness.charm + jenkins_charm._on_config_changed(config_changed_event) + + agent_relation = get_relation_mock.return_value + assert agent_relation.data[harness._unit_name].update.call_count == 1 diff --git a/tests/unit/test_service.py b/tests/unit/test_service.py new file mode 100644 index 0000000..3d366ca --- /dev/null +++ b/tests/unit/test_service.py @@ -0,0 +1,40 @@ +# Copyright 2024 Canonical Ltd. +# See LICENSE file for licensing details. +"""Test for service interaction.""" +# pylint: disable=protected-access +from unittest.mock import MagicMock + +import ops.testing +import pytest +from charms.operator_libs_linux.v0 import apt + +from charm import JenkinsAgentCharm + + +@pytest.mark.parametrize( + "f,error_thrown", + [ + ("import_key", apt.GPGKeyError), + ("add_package", apt.PackageError), + ("add_package", apt.PackageNotFoundError), + ], +) +def test_install_apt_package_gpg_key_error( + harness: ops.testing.Harness, monkeypatch: pytest.MonkeyPatch, f, error_thrown +): + """ + arrange: Harness with mocked apt module. + act: run _on_install hook with methods raising different errors. + assert: The charm should be in an error state. + """ + harness.begin() + charm: JenkinsAgentCharm = harness.charm + monkeypatch.setattr(apt, "RepositoryMapping", MagicMock()) + monkeypatch.setattr(apt, "import_key", MagicMock()) + monkeypatch.setattr(apt, "update", MagicMock()) + monkeypatch.setattr(apt, "add_package", MagicMock()) + + monkeypatch.setattr(apt, f, MagicMock(side_effect=[error_thrown])) + + with pytest.raises(RuntimeError, match="Error installing the agent service"): + charm._on_install(MagicMock(spec=ops.InstallEvent)) diff --git a/tox.ini b/tox.ini index 202340f..2157d87 100644 --- a/tox.ini +++ b/tox.ini @@ -54,6 +54,7 @@ deps = requests types-PyYAML types-requests + jenkinsapi>=0.3,<1 -r{toxinidir}/requirements.txt commands = pydocstyle {[vars]src_path} @@ -77,7 +78,7 @@ deps = -r{toxinidir}/requirements.txt commands = coverage run --source={[vars]src_path} \ - -m pytest --ignore={[vars]tst_path}integration -v --tb native -s {posargs} + -m pytest --ignore={[vars]tst_path}integration -v --tb native --log-cli-level=DEBUG -s {posargs} coverage report [testenv:coverage-report] @@ -100,11 +101,10 @@ commands = [testenv:integration] description = Run integration tests deps = - # Last compatible version with Juju 2.9 - juju==3.0.4 - pytest - pytest-asyncio - pytest-operator + # Pin protobuf version to fix + # https://stackoverflow.com/questions/72441758/typeerror-descriptors-cannot-not-be-created-directly + protobuf==3.20.0 + -r{toxinidir}/tests/integration/requirements_integration_tests.txt -r{toxinidir}/requirements.txt commands = pytest -v --tb native --ignore={[vars]tst_path}unit --log-cli-level=INFO -s {posargs}