diff --git a/.cirrus.yml b/.cirrus.yml index d68f905220..cec2be66ac 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -35,7 +35,7 @@ env: UBUNTU_PRIOR_IMAGE_NAME: "ubuntu-2204-jammy-v20240501" UBUNTU_PRIOR2_IMAGE_NAME: "ubuntu-2004-focal-v20240426" UBUNTU_PRIOR3_IMAGE_NAME: "ubuntu-1804-bionic-v20240116a" - UBUNTU_SNAP_IMAGE_NAME: "ubuntu-2204-jammy-v20240501" + UBUNTU_SNAP_IMAGE_NAME: "ubuntu-2404-noble-amd64-v20240423" UBUNTU_DEVEL_FAMILY_NAME: "ubuntu-2410-amd64" # Curl-command prefix for downloading task artifacts, simply add the @@ -56,6 +56,7 @@ gcp_credentials: ENCRYPTED[!77d4c8251094346c41db63cb05eba2ff98eaff04e58c5d0e2a8e # Run a simple lint on the community cluster flake8_task: + skip: &man-changes-include "changesIncludeOnly('man/*')" alias: "flake8_test" name: "Flake8 linting test" container: @@ -66,6 +67,7 @@ flake8_task: flake_script: tox -e flake8 pylint_task: + skip: *man-changes-include alias: "pylint_test" name: "pylint linting test" container: @@ -77,6 +79,7 @@ pylint_task: # breaks/changes in common modules. This is not meant to check any of the actual # collections or archive integrity. py_break_task: + skip: *man-changes-include alias: "py_break" name: "Breakage test python-$PY_VERSION" container: @@ -92,6 +95,7 @@ py_break_task: # Make sure a user can manually build an rpm from the checkout rpm_build_task: + skip: *man-changes-include alias: "rpm_build" name: "rpm Build From Checkout - ${BUILD_NAME}" gce_instance: &standardvm @@ -130,6 +134,7 @@ rpm_build_task: # Make sure a user can manually build a deb from the checkout deb_build_task: + skip: *man-changes-include alias: "deb_build" name: "deb Build From Checkout" gce_instance: @@ -151,6 +156,7 @@ deb_build_task: # Make sure a user can manually build a snap from the checkout snap_build_task: + skip: *man-changes-include alias: "snap_build" name: "snap Build From Checkout" gce_instance: @@ -175,6 +181,7 @@ snap_build_task: # Run the stage one (no mocking) tests across all distros on GCP report_stageone_task: + skip: *man-changes-include alias: "stageone_report" name: "Report Stage One - $BUILD_NAME" depends_on: @@ -251,6 +258,7 @@ report_stageone_task: path: "sos-fail-logs.tar" report_stageone_daily_task: + skip: *man-changes-include alias: "stageone_daily_report" name: "Report Stage One - ${UBUNTU_DEVEL_FAMILY_NAME}" allow_failures: true @@ -271,6 +279,7 @@ report_stageone_daily_task: # IFF the stage one tests all pass, then run stage two for latest distros report_stagetwo_task: + skip: *man-changes-include alias: "stagetwo_report" name: "Report Stage Two - $BUILD_NAME" depends_on: stageone_report @@ -296,6 +305,7 @@ report_stagetwo_task: log_artifacts: *logs report_stagetwo_daily_task: + skip: *man-changes-include alias: "stagetwo_daily_report" name: "Report Stage Two - ${UBUNTU_DEVEL_FAMILY_NAME}" allow_failures: true diff --git a/debian/changelog b/debian/changelog index 279e0e9c25..010c4534fb 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +doca-sosreport (4.8.1) noble; urgency=medium + + * rebase on top of 4.8.1 upstream + + -- Michael Filanov Thu, 24 Nov 2024 14:40:01 +0100 + doca-sosreport (4.8.0-1) unstable; urgency=low * replace sosreport with doca-sosreport diff --git a/debian/copyright b/debian/copyright index 921c89370d..c691b88833 100644 --- a/debian/copyright +++ b/debian/copyright @@ -3,21 +3,23 @@ Upstream-Name: sosreport Upstream-Contact: Jake Hunsaker Source: https://github.com/sosreport/sos -License: GPL-2+ - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or (at - your option) any later version. +Files: * +Copyright: (C) 2012-2017 Bryn M. Reeves + (C) 2007-2024 Red Hat, Inc. + (C) 2012-2024 Canonical Ltd. +License: GPL-2 + +License: GPL-2 + This program is free software; you can redistribute it and/or + modify it under the terms of the GNU General Public License + as published by the Free Software Foundation; version 2. . - This program is distributed in the hope that it will be useful, but - WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + This application is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. . You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. - . - On Debian systems, the complete text of the GNU General Public - License, version 2, can be found in /usr/share/common-licenses/GPL-2. diff --git a/docs/conf.py b/docs/conf.py index 7ce306d2d7..b2838dae6d 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -21,7 +21,6 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('..')) -import sos # -- General configuration ------------------------------------------------ @@ -52,16 +51,16 @@ # General information about the project. project = 'SoS' -copyright = '2014, Bryn Reeves' +project_copyright = '2014, Bryn Reeves' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '4.7.2' +version = '4.8.1' # The full version, including alpha/beta/rc tags. -release = '4.7.2' +release = '4.8.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/man/en/sos-clean.1 b/man/en/sos-clean.1 index 82d712da54..916ad994fb 100644 --- a/man/en/sos-clean.1 +++ b/man/en/sos-clean.1 @@ -1,6 +1,6 @@ -.TH SOS CLEAN 1 "Thu May 21 2020" +.TH SOS_CLEAN 1 "Thu May 21 2020" .SH NAME -sos clean - Obfuscate sensitive data from one or more sos reports +sos_clean, sos_mask \- Obfuscate sensitive data from one or more sos reports .SH SYNOPSIS .B sos clean TARGET [options] [\-\-domains] diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1 index 775a328809..183e11a082 100644 --- a/man/en/sos-collect.1 +++ b/man/en/sos-collect.1 @@ -1,7 +1,7 @@ -.TH SOS COLLECT 1 "April 2020" +.TH SOS_COLLECT 1 "April 2020" .SH NAME -sos collect \- Collect sos reports from multiple (cluster) nodes +sos_collect \- Collect sos reports from multiple (cluster) nodes .SH SYNOPSIS .B sos collect [\-a|\-\-all\-options] diff --git a/man/en/sos-help.1 b/man/en/sos-help.1 index ec7777acbd..53bd7d9bc4 100644 --- a/man/en/sos-help.1 +++ b/man/en/sos-help.1 @@ -1,6 +1,6 @@ -.TH SOS HELP 1 "Fri Nov 05 2021" +.TH SOS_HELP 1 "Fri Nov 05 2021" .SH NAME -sos help - get detailed help information on sos commands and components +sos_help \- get detailed help information on sos commands and components .SH SYNOPSIS .B sos help TOPIC diff --git a/man/en/sos-report.1 b/man/en/sos-report.1 index fc2c205e43..63d92db255 100644 --- a/man/en/sos-report.1 +++ b/man/en/sos-report.1 @@ -1,6 +1,6 @@ -.TH SOS REPORT 1 "Mon Mar 25 2013" +.TH SOS_REPORT 1 "Mon Mar 25 2013" .SH NAME -sos report \- Collect and package diagnostic and support data +sos_report \- Collect and package diagnostic and support data .SH SYNOPSIS .B sos report [-l|--list-plugins]\fR @@ -313,7 +313,7 @@ a timeout for a specific plugin, use the 'timeout' plugin option available to all plugins - e.g. '-k logs.timeout=600'. The plugin-specific timeout option will override this option. For example, using -\'--plugin-timeout=60 -k logs.timeout=600\' will set a timeout of 600 seconds for +\(aq--plugin-timeout=60 -k logs.timeout=600' will set a timeout of 600 seconds for the logs plugin and 60 seconds for all other enabled plugins. .TP .B \--cmd-timeout TIMEOUT @@ -382,7 +382,7 @@ to prevent sos report working dir to consume all free disk space. No plugin data is available at the end. Plugins will be collected sequentially, size of collected files and commands outputs -will be calculated and the plugin files will be immediatelly deleted prior execution +will be calculated and the plugin files will be immediately deleted prior execution of the next plugin. This still can consume whole free disk space, though. Please note, size estimations may not be accurate for highly utilized systems due to diff --git a/plugins_overview.py b/plugins_overview.py index 96484cb43a..7cefd372d8 100644 --- a/plugins_overview.py +++ b/plugins_overview.py @@ -27,7 +27,6 @@ PLUGDIR = 'sos/report/plugins' plugs_data = {} # the map of all plugins data to collect -plugcontent = '' # content of plugin file just being processed # method to parse an item of a_s_c/a_c_o/.. methods @@ -42,11 +41,11 @@ def add_valid_item(dest, item): return -# method to find in `plugcontent` all items of given method (a_c_s/a_c_o/..) +# method to find all items of given method (a_c_s/a_c_o/..) in plugin content, # split by comma; add each valid item to the `dest` list -def add_all_items(method, dest, wrapopen=r'\(', wrapclose=r'\)'): +def add_all_items(method, dest, plugfd, wrapopen=r'\(', wrapclose=r'\)'): regexp = f"{method}{wrapopen}(.*?){wrapclose}" - for match in re.findall(regexp, plugcontent, + for match in re.findall(regexp, plugfd, flags=re.MULTILINE | re.DOTALL): # tuple of distros ended by either (class|from|import) if isinstance(match, tuple): @@ -90,30 +89,32 @@ def add_all_items(method, dest, wrapopen=r'\(', wrapclose=r'\)'): 'journals': [], 'env': [], } - plugcontent = open( - os.path.join(PLUGDIR, plugfile)).read().replace('\n', '') - add_all_items( - "from sos.report.plugins import ", - plugs_data[plugname]['distros'], - wrapopen='', - wrapclose='(class|from|import)' - ) - add_all_items("profiles = ", plugs_data[plugname]['profiles'], wrapopen='') - add_all_items("packages = ", plugs_data[plugname]['packages'], wrapopen='') - add_all_items("add_copy_spec", plugs_data[plugname]['copyspecs']) - add_all_items("add_forbidden_path", plugs_data[plugname]['forbidden']) - add_all_items("add_cmd_output", plugs_data[plugname]['commands']) - add_all_items("collect_cmd_output", plugs_data[plugname]['commands']) - add_all_items("add_service_status", plugs_data[plugname]['service_status']) - add_all_items("add_journal", plugs_data[plugname]['journals']) - add_all_items("add_env_var", plugs_data[plugname]['env']) + with open(os.path.join(PLUGDIR, plugfile), + encoding='utf-8').read().replace('\n', '') as pfd: + add_all_items( + "from sos.report.plugins import ", plugs_data[plugname]['distros'], + pfd, wrapopen='', wrapclose='(class|from|import)' + ) + add_all_items("profiles = ", plugs_data[plugname]['profiles'], + pfd, wrapopen='') + add_all_items("packages = ", plugs_data[plugname]['packages'], + pfd, wrapopen='') + add_all_items("add_copy_spec", plugs_data[plugname]['copyspecs'], pfd) + add_all_items("add_forbidden_path", + plugs_data[plugname]['forbidden'], pfd) + add_all_items("add_cmd_output", plugs_data[plugname]['commands'], pfd) + add_all_items("collect_cmd_output", + plugs_data[plugname]['commands'], pfd) + add_all_items("add_service_status", + plugs_data[plugname]['service_status'], pfd) + add_all_items("add_journal", plugs_data[plugname]['journals'], pfd) + add_all_items("add_env_var", plugs_data[plugname]['env'], pfd) # print output; if "csv" is cmdline argument, print in CSV format, else JSON if (len(sys.argv) > 1) and (sys.argv[1] == "csv"): print("plugin;url;distros;profiles;packages;copyspecs;forbidden;commands;" "service_status;journals;env_vars") - for plugname in plugs_data.keys(): - plugin = plugs_data[plugname] + for plugname, plugin in plugs_data.items(): # determine max number of lines - usually # "max(len(copyspec),len(commands))" # ignore 'sourcecode' key as it diff --git a/pylintrc b/pylintrc index 8bdf4f8c9c..a3522df459 100644 --- a/pylintrc +++ b/pylintrc @@ -33,65 +33,14 @@ disable= R0913, # too-many-arguments R0914, # too-many-locals R0915, # too-many-statements + R0917, # too-many-positional-arguments R1702, # too-many-nested-blocks W0201, # attribute-defined-outside-init W0511, # fixme ###################### VV things we should fix VV - W0613, # unused-argument - W0612, # unused-variable - R1705, # no-else-return W0719, # broad-exception-raised W1203, # logging-fstring-interpolation - W1406, # redundant-u-string-prefix - R1732, # consider-using-with - W1514, # unspecified-encoding - W0107, # unnecessary-pass W0718, # broad-exception-caught W0102, # dangerous-default-value - C0325, # superfluous-parens - E1111, # assignment-from-no-return - R1714, # consider-using-in - C0206, # consider-using-dict-items - W0221, # arguments-differ - R1711, # useless-return - R1720, # no-else-raise - R0205, # useless-object-inheritance - W3101, # missing-timeout - C0201, # consider-iterating-dictionary - R1719, # simplifiable-if-expression - W1201, # logging-not-lazy - W0707, # raise-missing-from - W0237, # arguments-renamed - E0606, # possibly-used-before-assignment - R1724, # no-else-continue - R1729, # use-a-generator - C0117, # unnecessary-negation - W0622, # redefined-builtin W0212, # protected-access - R1728, # consider-using-generator - R1710, # inconsistent-return-statements - C1802, # use-implicit-booleaness-not-len - W0706, # try-except-raise - C1803, # use-implicit-booleaness-not-comparison - W0231, # super-init-not-called - C0123, # unidiomatic-typecheck - C0207, # use-maxsplit-arg - C2801, # unnecessary-dunder-call - E0203, # access-member-before-definition - E0611, # no-name-in-module - E0702, # raising-bad-type - E1101, # no-member - E1135, # unsupported-membership-test - R1703, # simplifiable-if-statement - R1718, # consider-using-set-comprehension - R1721, # unnecessary-comprehension - R1722, # consider-using-sys-exit - W0105, # pointless-string-statement - W0108, # unnecessary-lambda - W0222, # signature-differs - W0223, # abstract-method - W0246, # useless-parent-delegation - W0621, # redefined-outer-name - W0640, # cell-var-from-loop W1509, # subprocess-popen-preexec-fn - W4701, # modified-iterating-list diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index f45e939d4b..1c796aa38d 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -5,12 +5,12 @@ description: | primarily aimed at Linux distributions and other UNIX-like operating systems. grade: stable -base: core22 +base: core24 confinement: classic adopt-info: sos -license: GPL-2.0-or-later +license: GPL-2.0-only environment: - PYTHONPATH: ${SNAP}/lib/python3.10/site-packages:${SNAP}/usr/lib/python3/dist-packages:${PYTHONPATH} + PYTHONPATH: ${SNAP}/lib/python3.12/site-packages:${SNAP}/usr/lib/python3/dist-packages:${PYTHONPATH} parts: sos: @@ -24,13 +24,12 @@ parts: build-packages: - git - python3 - - snapcraft - gettext - python3-venv stage-packages: - - python3.10-minimal - - libpython3.10-minimal - - libpython3.10-stdlib + - python3.12-minimal + - libpython3.12-minimal + - libpython3.12-stdlib python-packages: - pip - setuptools diff --git a/sos.spec b/sos.spec index 8f6e920ec4..2c7c44a16d 100644 --- a/sos.spec +++ b/sos.spec @@ -1,6 +1,6 @@ Summary: A set of tools to gather troubleshooting information from a system Name: doca-sosreport -Version: 4.8.0 +Version: 4.8.1 Release: 1%{?dist} Source0: %{name}-%{version}.tar.gz License: GPL-2.0-or-later @@ -8,7 +8,6 @@ BuildArch: noarch Url: https://github.com/sosreport/sos BuildRequires: python3-devel BuildRequires: python3-setuptools -Requires: python3-rpm Requires: python3-pexpect %if 0%{?rhel} && 0%{?rhel} < 10 Requires: python3-setuptools @@ -97,6 +96,13 @@ rm -rf %{buildroot}/usr/config/ %config(noreplace) %{_sysconfdir}/sos/sos-mlx-cloud-verification.conf %changelog +* Tue Oct 15 2024 Pavel Moravec = 4.8.1 +- New upstream release + +* Sat Aug 17 2024 Jake Hunsaker = 4.8.0 +- New upstream release +- License clarification to GPLv2 only + * Fri Jun 21 2024 Pavel Moravec = 4.7.2 - New upstream release diff --git a/sos/__init__.py b/sos/__init__.py index cc3406abf8..66dc9e7b19 100644 --- a/sos/__init__.py +++ b/sos/__init__.py @@ -14,7 +14,7 @@ This module houses the i18n setup and message function. The default is to use gettext to internationalize messages. """ -__version__ = "4.8.0" +__version__ = "4.8.1" import os import sys @@ -34,12 +34,6 @@ def _default(msg): _sos = _default -# py3 < 3.6 compat -try: - ModuleNotFoundError -except NameError: - ModuleNotFoundError = ImportError - class SoS(): """Main entrypoint for sos from the command line @@ -74,10 +68,10 @@ def __init__(self, args): ['collector']) except ModuleNotFoundError as err: import sos.missing - if 'sos.collector' in err.msg: + if 'sos.collector' in str(err.msg): # is not locally installed - packaged separately self._components['collect'] = (sos.missing.MissingCollect, []) - elif 'pexpect' in err.msg: + elif 'pexpect' in str(err.msg): # cannot be imported due to missing the pexpect dep self._components['collect'] = (sos.missing.MissingPexpect, []) else: @@ -85,16 +79,16 @@ def __init__(self, args): raise # build the top-level parser _com_string = '' - for com in self._components: - aliases = self._components[com][1] + for com, value in self._components.items(): + aliases = value[1] aliases.insert(0, com) _com = ', '.join(aliases) - desc = self._components[com][0].desc + desc = value[0].desc _com_string += (f"\t{_com:<30}{desc}\n") usage_string = ("%(prog)s [options]\n\n" "Available components:\n") usage_string = usage_string + _com_string - epilog = ("See `sos --help` for more information") + epilog = "See `sos --help` for more information" self.parser = ArgumentParser(usage=usage_string, epilog=epilog) self.parser.register('action', 'extend', SosListOption) # set the component subparsers @@ -107,16 +101,16 @@ def __init__(self, args): # now build the parser for each component. # this needs to be done here, as otherwise --help will be unavailable # for the component subparsers - for comp in self._components: + for comp, value in self._components.items(): _com_subparser = self.subparsers.add_parser( comp, - aliases=self._components[comp][1], + aliases=value[1], prog=f"sos {comp}" ) _com_subparser.usage = f"sos {comp} [options]" _com_subparser.register('action', 'extend', SosListOption) self._add_common_options(_com_subparser) - self._components[comp][0].add_parser_options(parser=_com_subparser) + value[0].add_parser_options(parser=_com_subparser) _com_subparser.set_defaults(component=comp) self.args = self.parser.parse_args(self.cmdline) self._init_component() @@ -173,7 +167,7 @@ def _init_component(self): initialize that component. """ _com = self.args.component - if _com not in self._components.keys(): + if _com not in self._components: print(f"Unknown subcommand '{_com}' specified") try: _to_load = self._components[_com][0] diff --git a/sos/archive.py b/sos/archive.py index e03b8cd629..3374899220 100644 --- a/sos/archive.py +++ b/sos/archive.py @@ -37,7 +37,7 @@ P_DIR = "dir" -class Archive(object): +class Archive: """Abstract base class for archives.""" @classmethod @@ -116,18 +116,18 @@ def get_archive_path(self): directory based cache prior to packaging should return the path to the temporary directory where the report content is located""" - pass + raise NotImplementedError def cleanup(self): """Clean up any temporary resources used by an Archive class.""" - pass + raise NotImplementedError def finalize(self, method): """Finalize an archive object via method. This may involve creating An archive that is subsequently compressed or simply closing an archive that supports in-line handling. If method is automatic then the following methods are tried in order: xz, gzip""" - pass + raise NotImplementedError class FileCacheArchive(Archive): @@ -159,7 +159,7 @@ def __init__(self, name, tmpdir, policy, threads, enc_opts, sysroot, def dest_path(self, name): if os.path.isabs(name): name = name.lstrip(os.sep) - return (os.path.join(self._archive_root, name)) + return os.path.join(self._archive_root, name) def join_sysroot(self, path): if not self.sysroot or path.startswith(self.sysroot): @@ -210,7 +210,7 @@ def in_archive(path): # Build a list of path components in root-to-leaf order. path = src_dir path_comps = [] - while path != '/' and path != '': + while path not in ('/', ''): head, tail = os.path.split(path) path_comps.append(tail) path = head @@ -304,7 +304,7 @@ def check_path(self, src, path_type, dest=None, force=False): if os.path.exists(dest_dir) and not os.path.isdir(dest_dir): raise ValueError(f"path '{dest_dir}' exists and is not a " "directory") - elif not os.path.exists(dest_dir): + if not os.path.exists(dest_dir): src_dir = src if path_type == P_DIR else os.path.split(src)[0] self._make_leading_paths(src_dir) @@ -339,13 +339,13 @@ def is_special(mode): def _copy_attributes(self, src, dest): # copy file attributes, skip SELinux xattrs for /sys and /proc try: - stat = os.stat(src) + _stat = os.stat(src) if src.startswith("/sys/") or src.startswith("/proc/"): shutil.copymode(src, dest) - os.utime(dest, ns=(stat.st_atime_ns, stat.st_mtime_ns)) + os.utime(dest, ns=(_stat.st_atime_ns, _stat.st_mtime_ns)) else: shutil.copystat(src, dest) - os.chown(dest, stat.st_uid, stat.st_gid) + os.chown(dest, _stat.st_uid, _stat.st_gid) except Exception as e: self.log_debug(f"caught '{e}' setting attributes of '{dest}'") @@ -377,7 +377,7 @@ def add_file(self, src, dest=None, force=False): # Open file case: first rewind the file to obtain # everything written to it. src.seek(0) - with open(dest, "w") as f: + with open(dest, "w", encoding='utf-8') as f: for line in src: f.write(line) file_name = "open file" @@ -515,8 +515,8 @@ def name_max(self): if 'PC_NAME_MAX' in os.pathconf_names: pc_name_max = os.pathconf_names['PC_NAME_MAX'] return os.pathconf(self._archive_root, pc_name_max) - else: - return 255 + + return 255 def get_tmp_dir(self): return self._archive_root @@ -656,7 +656,7 @@ def _encrypt(self, archive): r = sos_get_command_output(enc_cmd, timeout=0, env=env) if r["status"] == 0: return arc_name - elif r["status"] == 2: + if r["status"] == 2: if self.enc_opts["key"]: msg = "Specified key not in keyring" else: @@ -667,8 +667,8 @@ def _encrypt(self, archive): msg = f"gpg exited with code {r['status']}" raise Exception(msg) - def _build_archive(self, method): - pass + def _build_archive(self, method): # pylint: disable=unused-argument + return self.name() class TarFileArchive(FileCacheArchive): @@ -719,7 +719,7 @@ def copy_permissions_filter(self, tarinfo): def get_selinux_context(self, path): try: - (rc, c) = selinux.getfilecon(path) + (_, c) = selinux.getfilecon(path) return c except Exception: return None @@ -727,11 +727,6 @@ def get_selinux_context(self, path): def name(self): return f"{self._archive_root}.{self._suffix}" - def name_max(self): - # GNU Tar format supports unlimited file name length. Just return - # the limit of the underlying FileCacheArchive. - return super().name_max() - def _build_archive(self, method): if method == 'auto': method = 'xz' if find_spec('lzma') is not None else 'gzip' @@ -743,22 +738,21 @@ def _build_archive(self, method): kwargs = {'compresslevel': 6} else: kwargs = {'preset': 3} - tar = tarfile.open(self._archive_name, mode=f"w:{_comp_mode}", - **kwargs) - # add commonly reviewed files first, so that they can be more easily - # read from memory without needing to extract the whole archive - for _content in ['version.txt', 'sos_reports', 'sos_logs']: - if not os.path.exists(os.path.join(self._archive_root, _content)): - continue - tar.add( - os.path.join(self._archive_root, _content), - arcname=f"{self._name}/{_content}" - ) - # we need to pass the absolute path to the archive root but we - # want the names used in the archive to be relative. - tar.add(self._archive_root, arcname=self._name, - filter=self.copy_permissions_filter) - tar.close() + with tarfile.open(self._archive_name, mode=f"w:{_comp_mode}", + **kwargs) as tar: + # Add commonly reviewed files first, so that they can be more + # easily read from memory without needing to extract + # the whole archive + for _content in ['version.txt', 'sos_reports', 'sos_logs']: + if os.path.exists(os.path.join(self._archive_root, _content)): + tar.add( + os.path.join(self._archive_root, _content), + arcname=f"{self._name}/{_content}" + ) + # we need to pass the absolute path to the archive root but we + # want the names used in the archive to be relative. + tar.add(self._archive_root, arcname=self._name, + filter=self.copy_permissions_filter) self._suffix += f".{_comp_mode}" return self.name() diff --git a/sos/cleaner/__init__.py b/sos/cleaner/__init__.py index 070db544f0..e057936fab 100644 --- a/sos/cleaner/__init__.py +++ b/sos/cleaner/__init__.py @@ -141,7 +141,8 @@ def __init__(self, parser=None, args=None, cmdline=None, in_place=False, for _parser in self.opts.disable_parsers: for _loaded in self.parsers: - _loaded_name = _loaded.name.lower().split('parser')[0].strip() + _temp = _loaded.name.lower().split('parser', maxsplit=1)[0] + _loaded_name = _temp.strip() if _parser.lower().strip() == _loaded_name: self.log_info(f"Disabling parser: {_loaded_name}") self.ui_log.warning( @@ -206,7 +207,7 @@ def load_map_file(self): f"ERROR: map file {self.opts.map_file} does not exist, " "will not load any obfuscation matches") else: - with open(self.opts.map_file, 'r') as mf: + with open(self.opts.map_file, 'r', encoding='utf-8') as mf: try: _conf = json.load(mf) except json.JSONDecodeError: @@ -413,7 +414,8 @@ def execute(self): chksum_name = self.obfuscate_string( f"{arc_path.split('/')[-1]}.{self.hash_name}" ) - with open(os.path.join(self.sys_tmp, chksum_name), 'w') as cf: + with open(os.path.join(self.sys_tmp, chksum_name), 'w', + encoding='utf-8') as cf: cf.write(checksum) self.write_cleaner_log() @@ -455,7 +457,7 @@ def rebuild_nested_archive(self): if checksum is not None: dname = f"checksums/{arc_dest}.{self.hash_name}" self.archive.add_string(checksum, dest=dname) - for dirn, dirs, files in os.walk(self.nested_archive.extracted_path): + for dirn, _, files in os.walk(self.nested_archive.extracted_path): for filename in files: fname = os.path.join(dirn, filename) dname = fname.split(self.nested_archive.extracted_path)[-1] @@ -483,7 +485,7 @@ def write_map_to_file(self, _map, path): """Write the mapping to a file on disk that is in the same location as the final archive(s). """ - with open(path, 'w') as mf: + with open(path, 'w', encoding='utf-8') as mf: mf.write(json.dumps(_map, indent=4)) return path @@ -504,9 +506,8 @@ def write_map_for_config(self, _map): """ if self.opts.map_file and not self.opts.no_update: cleaner_dir = os.path.dirname(self.opts.map_file) - """ Attempt to create the directory /etc/sos/cleaner - just in case it didn't exist previously - """ + # Attempt to create the directory /etc/sos/cleaner + # just in case it didn't exist previously try: os.makedirs(cleaner_dir, exist_ok=True) self.write_map_to_file(_map, self.opts.map_file) @@ -522,7 +523,7 @@ def write_cleaner_log(self, archive=False): log_name = os.path.join( self.sys_tmp, f"{self.arc_name}-obfuscation.log" ) - with open(log_name, 'w') as logfile: + with open(log_name, 'w', encoding='utf-8') as logfile: self.sos_log_file.seek(0) for line in self.sos_log_file.readlines(): logfile.write(line) @@ -783,20 +784,21 @@ def obfuscate_file(self, filename, short_name=None, arc_name=None): return 0 self.log_debug(f"Obfuscating {short_name or filename}", caller=arc_name) - tfile = tempfile.NamedTemporaryFile(mode='w', dir=self.tmpdir) - with open(filename, 'r', errors='replace') as fname: - for line in fname: - try: - line, count = self.obfuscate_line(line, _parsers) - subs += count - tfile.write(line) - except Exception as err: - self.log_debug(f"Unable to obfuscate {short_name}: " - f"{err}", caller=arc_name) - tfile.seek(0) - if subs: - shutil.copyfile(tfile.name, filename) - tfile.close() + with tempfile.NamedTemporaryFile(mode='w', dir=self.tmpdir) \ + as tfile: + with open(filename, 'r', encoding='utf-8', + errors='replace') as fname: + for line in fname: + try: + line, count = self.obfuscate_line(line, _parsers) + subs += count + tfile.write(line) + except Exception as err: + self.log_debug(f"Unable to obfuscate {short_name}:" + f"{err}", caller=arc_name) + tfile.seek(0) + if subs: + shutil.copyfile(tfile.name, filename) _ob_short_name = self.obfuscate_string(short_name.split('/')[-1]) _ob_filename = short_name.replace(short_name.split('/')[-1], diff --git a/sos/cleaner/archives/__init__.py b/sos/cleaner/archives/__init__.py index 35dc7b8e91..a70ae5d5ed 100644 --- a/sos/cleaner/archives/__init__.py +++ b/sos/cleaner/archives/__init__.py @@ -23,17 +23,27 @@ # process for extraction if this method is a part of the SoSObfuscationArchive # class. So, the simplest solution is to remove it from the class. def extract_archive(archive_path, tmpdir): - archive = tarfile.open(archive_path) - path = os.path.join(tmpdir, 'cleaner') - # set extract filter since python 3.12 (see PEP-706 for more) - # Because python 3.10 and 3.11 raises false alarms as exceptions - # (see #3330 for examples), we can't use data filter but must - # fully trust the archive (legacy behaviour) - archive.extraction_filter = getattr(tarfile, 'fully_trusted_filter', - (lambda member, path: member)) - archive.extractall(path) - archive.close() - return os.path.join(path, archive.name.split('/')[-1].split('.tar')[0]) + with tarfile.open(archive_path) as archive: + path = os.path.join(tmpdir, 'cleaner') + # set extract filter since python 3.12 (see PEP-706 for more) + # Because python 3.10 and 3.11 raises false alarms as exceptions + # (see #3330 for examples), we can't use data filter but must + # fully trust the archive (legacy behaviour) + archive.extraction_filter = getattr(tarfile, 'fully_trusted_filter', + (lambda member, path: member)) + + # Guard against "Arbitrary file write during tarfile extraction" + # Checks the extracted files don't stray out of the target directory. + for member in archive.getmembers(): + member_path = os.path.join(path, member.name) + abs_directory = os.path.abspath(path) + abs_target = os.path.abspath(member_path) + prefix = os.path.commonprefix([abs_directory, abs_target]) + if prefix != abs_directory: + raise Exception(f"Attempted path traversal in tarfle" + f"{prefix} != {abs_directory}") + archive.extract(member, path) + return os.path.join(path, archive.name.split('/')[-1].split('.tar')[0]) class SoSObfuscationArchive(): @@ -71,7 +81,7 @@ def __init__(self, archive_path, tmpdir): @classmethod def check_is_type(cls, arc_path): """Check if the archive is a well-known type we directly support""" - return False + raise NotImplementedError @property def is_sos(self): @@ -83,6 +93,7 @@ def is_insights(self): def _load_self(self): if self.is_tarfile: + # pylint: disable=consider-using-with self.tarobj = tarfile.open(self.archive_path) def get_nested_archives(self): @@ -102,8 +113,7 @@ def get_archive_root(self): toplevel = self.tarobj.firstmember if toplevel.isdir(): return toplevel.name - else: - return os.path.dirname(toplevel.name) or os.sep + return os.path.dirname(toplevel.name) or os.sep return os.path.abspath(self.archive_path) def report_msg(self, msg): @@ -160,8 +170,7 @@ def format_file_name(self, fname): if not self.archive_root: self.archive_root = self.get_archive_root() return os.path.join(self.archive_root, fname) - else: - return os.path.join(self.extracted_path, fname) + return os.path.join(self.extracted_path, fname) def get_file_content(self, fname): """Return the content from the specified fname. Particularly useful for @@ -179,7 +188,8 @@ def get_file_content(self, fname): return '' else: try: - with open(self.format_file_name(fname), 'r') as to_read: + with open(self.format_file_name(fname), 'r', + encoding='utf-8') as to_read: return to_read.read() except Exception as err: self.log_debug(f"Failed to get contents of {fname}: {err}") @@ -257,10 +267,9 @@ def build_tar_file(self, method): else: compr_args = {'compresslevel': 6} self.log_debug(f"Building tar file {tarpath}") - tar = tarfile.open(tarpath, mode=mode, **compr_args) - tar.add(self.extracted_path, - arcname=os.path.split(self.archive_name)[1]) - tar.close() + with tarfile.open(tarpath, mode=mode, **compr_args) as tar: + tar.add(self.extracted_path, + arcname=os.path.split(self.archive_name)[1]) return tarpath def compress(self, method): @@ -284,15 +293,15 @@ def remove_extracted_path(self): so that we don't take up that duplicate space any longer during execution """ - def force_delete_file(action, name, exc): - os.chmod(name, stat.S_IWUSR) - if os.path.isfile(name): - os.remove(name) + try: + self.log_debug(f"Removing {self.extracted_path}") + shutil.rmtree(self.extracted_path) + except OSError: + os.chmod(self.extracted_path, stat.S_IWUSR) + if os.path.isfile(self.extracted_path): + os.remove(self.extracted_path) else: - shutil.rmtree(name) - self.log_debug(f"Removing {self.extracted_path}") - # pylint: disable-next=deprecated-argument - shutil.rmtree(self.extracted_path, onerror=force_delete_file) + shutil.rmtree(self.extracted_path) def extract_self(self): """Extract an archive into our tmpdir so that we may inspect it or @@ -323,18 +332,16 @@ def get_file_list(self): Will not include symlinks, as those are handled separately """ - for dirname, dirs, files in os.walk(self.extracted_path): + for dirname, _, files in os.walk(self.extracted_path): for filename in files: _fname = os.path.join(dirname, filename.lstrip('/')) - if os.path.islink(_fname): - continue - else: + if not os.path.islink(_fname): yield _fname def get_directory_list(self): """Return a list of all directories within the archive""" dir_list = [] - for dirname, dirs, files in os.walk(self.extracted_path): + for dirname, _, _ in os.walk(self.extracted_path): dir_list.append(dirname) return dir_list diff --git a/sos/cleaner/mappings/__init__.py b/sos/cleaner/mappings/__init__.py index 3bcef06ded..fc4d75c132 100644 --- a/sos/cleaner/mappings/__init__.py +++ b/sos/cleaner/mappings/__init__.py @@ -125,13 +125,13 @@ def get(self, item): return self.add(item) return self.dataset[item] - def conf_update(self, map_dict): + def conf_update(self, config): """Update the map using information from a previous run to ensure that we have consistent obfuscation between reports Positional arguments: - :param map_dict: A dict of mappings with the form of - {clean_entry: 'obfuscated_entry'} + :param config: A dict of mappings with the form of + {clean_entry: 'obfuscated_entry'} """ - self.dataset.update(map_dict) + self.dataset.update(config) diff --git a/sos/cleaner/mappings/hostname_map.py b/sos/cleaner/mappings/hostname_map.py index a19fd2a816..2f189100f1 100644 --- a/sos/cleaner/mappings/hostname_map.py +++ b/sos/cleaner/mappings/hostname_map.py @@ -73,12 +73,10 @@ def load_domains_from_map(self): _domain_to_inject = '.'.join(domain.split('.')[1:-1]) if not _domain_to_inject: continue - for existing_domain in self.dataset.keys(): + for existing_domain, value in self.dataset.items(): _existing = '.'.join(existing_domain.split('.')[:-1]) if _existing == _domain_to_inject: - _ob_domain = '.'.join( - self.dataset[existing_domain].split('.')[:-1] - ) + _ob_domain = '.'.join(value.split('.')[:-1]) self._domains[_domain_to_inject] = _ob_domain self.set_initial_counts() @@ -122,7 +120,7 @@ def domain_name_in_loaded_domains(self, domain): if len(host) == 1: # don't block on host's shortname return host[0] in self.hosts - elif any([no_tld.endswith(_d) for _d in self._domains]): + if any(no_tld.endswith(_d) for _d in self._domains): return True return False @@ -153,7 +151,7 @@ def get(self, item): ext = '.' + item.split('.')[-1] item = item.replace(ext, '') suffix += ext - if item not in self.dataset.keys(): + if item not in self.dataset: # try to account for use of '-' in names that include hostnames # and don't create new mappings for each of these for _existing in sorted(self.dataset.keys(), reverse=True, @@ -163,17 +161,17 @@ def get(self, item): _h = _existing.split('.') # avoid considering a full FQDN match as a new match off of # the hostname of an existing match - if _h[0] and _h[0] in self.hosts.keys(): + if _h[0] and _h[0] in self.hosts: _host_substr = True if len(_test) == 1 or not _test[0]: # does not match existing obfuscation continue - elif not _host_substr and (_test[0].endswith('.') or - item.endswith(_existing)): + if not _host_substr and (_test[0].endswith('.') or + item.endswith(_existing)): # new hostname in known domain final = super().get(item) break - elif item.split(_test[0]): + if item.split(_test[0]): # string that includes existing FQDN obfuscation substring # so, only obfuscate the FQDN part try: @@ -196,7 +194,7 @@ def sanitize_item(self, item): if len(host) == 2: # we have just a domain name, e.g. example.com dname = self.sanitize_domain(host) - if all([h.isupper() for h in host]): + if all(h.isupper() for h in host): dname = dname.upper() return dname if len(host) > 2: @@ -215,7 +213,7 @@ def sanitize_item(self, item): ob_domain = self.sanitize_domain(domain) self.dataset[item] = ob_domain _fqdn = '.'.join([ob_hostname, ob_domain]) - if all([h.isupper() for h in host]): + if all(h.isupper() for h in host): _fqdn = _fqdn.upper() return _fqdn return None diff --git a/sos/cleaner/mappings/ip_map.py b/sos/cleaner/mappings/ip_map.py index 11d779b5a3..be6230da5c 100644 --- a/sos/cleaner/mappings/ip_map.py +++ b/sos/cleaner/mappings/ip_map.py @@ -54,34 +54,34 @@ def ip_in_dataset(self, ipaddr): already created """ for _ip in self.dataset.values(): - if str(ipaddr).split('/')[0] == _ip.split('/')[0]: + if str(ipaddr).split('/', maxsplit=1)[0] == _ip.split('/')[0]: return True return False - def get(self, ipaddr): + def get(self, item): """Ensure that when requesting an obfuscated address, we return a str object instead of an IPv(4|6)Address object """ filt_start = ('/', '=', ']', ')') - if ipaddr.startswith(filt_start): - ipaddr = ipaddr.lstrip(''.join(filt_start)) + if item.startswith(filt_start): + item = item.lstrip(''.join(filt_start)) - if ipaddr in self.dataset.keys(): - return self.dataset[ipaddr] + if item in self.dataset: + return self.dataset[item] - if self.ignore_item(ipaddr) or self.ip_in_dataset(ipaddr): - return ipaddr + if self.ignore_item(item) or self.ip_in_dataset(item): + return item # it's not in there, but let's make sure we haven't previously added # an address with a CIDR notation and we're now looking for it without # that notation - if '/' not in ipaddr: - for key in self.dataset.keys(): - if key.startswith(ipaddr): - return self.dataset[key].split('/')[0] + if '/' not in item: + for key, value in self.dataset.items(): + if key.startswith(item): + return value.split('/')[0] # fallback to the default map behavior of adding it fresh - return self.add(ipaddr) + return self.add(item) def set_ip_cidr_from_existing_subnet(self, addr): """Determine if a given address is in a subnet of an already obfuscated @@ -164,7 +164,7 @@ def sanitize_ipaddr(self, addr): def _new_obfuscated_single_address(self): def _gen_address(): _octets = [] - for i in range(0, 4): + for _ in range(0, 4): _octets.append(random.randint(11, 99)) return f"{_octets[0]}.{_octets[1]}.{_octets[2]}.{_octets[3]}" diff --git a/sos/cleaner/mappings/ipv6_map.py b/sos/cleaner/mappings/ipv6_map.py index d287151368..10b1cbc604 100644 --- a/sos/cleaner/mappings/ipv6_map.py +++ b/sos/cleaner/mappings/ipv6_map.py @@ -84,9 +84,9 @@ def conf_update(self, config): _net.add_obfuscated_host_address(host, _ob_host) self.dataset[host] = _ob_host - def sanitize_item(self, ipaddr): - _prefix = ipaddr.split('/')[-1] if '/' in ipaddr else '' - _ipaddr = ipaddr + def sanitize_item(self, item): + _prefix = item.split('/')[-1] if '/' in item else '' + _ipaddr = item if not _prefix: # assume a /64 default per protocol _ipaddr += "/64" @@ -186,13 +186,13 @@ def _obfuscate_network_address(self): """ if self.addr.is_global: return self._obfuscate_global_address() - elif self.addr.is_link_local: + if self.addr.is_link_local: # link-local addresses are always fe80::/64. This is not sensitive # in itself, and retaining the information that an address is a # link-local address is important for problem analysis, so don't # obfuscate this network information. return self.network_addr - elif self.addr.is_private: + if self.addr.is_private: return self._obfuscate_private_address() return self.network_addr @@ -256,19 +256,19 @@ def obfuscate_host_address(self, addr): :returns: An obfuscated address within this network :rtype: ``str`` """ - def _generate_address(): + def _generate_address(host): return ''.join([ self._obfuscated_network, - ':'.join(generate_hextets(_host.split(':'))) + ':'.join(generate_hextets(host.split(':'))) ]) if addr.compressed not in self.hosts: # separate host from the address by removing its network prefix _n = self.network_addr.rstrip(':') _host = addr.compressed[len(_n):].lstrip(':') - _ob_host = _generate_address() + _ob_host = _generate_address(_host) while _ob_host in self.hosts.values(): - _ob_host = _generate_address() + _ob_host = _generate_address(_host) self.add_obfuscated_host_address(addr.compressed, _ob_host) return self.hosts[addr.compressed] diff --git a/sos/cleaner/mappings/keyword_map.py b/sos/cleaner/mappings/keyword_map.py index cbeb0c4e59..716c2716d1 100644 --- a/sos/cleaner/mappings/keyword_map.py +++ b/sos/cleaner/mappings/keyword_map.py @@ -25,6 +25,8 @@ class SoSKeywordMap(SoSMap): word_count = 0 def sanitize_item(self, item): + if item in self.dataset: + return self.dataset[item] _ob_item = f"obfuscatedword{self.word_count}" self.word_count += 1 if _ob_item in self.dataset.values(): diff --git a/sos/cleaner/mappings/mac_map.py b/sos/cleaner/mappings/mac_map.py index 464344abe9..cf6744e225 100644 --- a/sos/cleaner/mappings/mac_map.py +++ b/sos/cleaner/mappings/mac_map.py @@ -64,7 +64,7 @@ def sanitize_item(self, item): """ hexdigits = "0123456789abdcef" hextets = [] - for i in range(0, 3): + for _ in range(0, 3): hextets.append(''.join(random.choice(hexdigits) for x in range(2))) hextets = tuple(hextets) diff --git a/sos/cleaner/mappings/username_map.py b/sos/cleaner/mappings/username_map.py index f03dd510e3..f76db4e993 100644 --- a/sos/cleaner/mappings/username_map.py +++ b/sos/cleaner/mappings/username_map.py @@ -24,11 +24,11 @@ class SoSUsernameMap(SoSMap): match_full_words_only = True name_count = 0 - def sanitize_item(self, username): + def sanitize_item(self, item): """Obfuscate a new username not currently found in the map """ ob_name = f"obfuscateduser{self.name_count}" self.name_count += 1 if ob_name in self.dataset.values(): - return self.sanitize_item(username.lower()) + return self.sanitize_item(item.lower()) return ob_name diff --git a/sos/cleaner/parsers/ipv6_parser.py b/sos/cleaner/parsers/ipv6_parser.py index 2db054d113..c8d37dbaee 100644 --- a/sos/cleaner/parsers/ipv6_parser.py +++ b/sos/cleaner/parsers/ipv6_parser.py @@ -47,8 +47,7 @@ def get_map_contents(self): 'version': self.mapping.version, 'networks': {} } - for net in self.mapping.networks: - _net = self.mapping.networks[net] + for _, _net in self.mapping.networks.items(): _d['networks'][_net.original_address] = { 'obfuscated': _net.obfuscated_address, 'hosts': {} diff --git a/sos/cleaner/preppers/keywords.py b/sos/cleaner/preppers/keywords.py index 9baf86a2a8..8396151d92 100644 --- a/sos/cleaner/preppers/keywords.py +++ b/sos/cleaner/preppers/keywords.py @@ -21,12 +21,13 @@ class KeywordPrepper(SoSPrepper): name = 'keyword' + # pylint: disable=unused-argument def _get_items_for_keyword(self, archive): items = [] for kw in self.opts.keywords: items.append(kw) if self.opts.keyword_file and os.path.exists(self.opts.keyword_file): - with open(self.opts.keyword_file, 'r') as kwf: + with open(self.opts.keyword_file, 'r', encoding='utf-8') as kwf: items.extend(kwf.read().splitlines()) for item in items: diff --git a/sos/cleaner/preppers/mac.py b/sos/cleaner/preppers/mac.py index 75f6607631..55d0de5f32 100644 --- a/sos/cleaner/preppers/mac.py +++ b/sos/cleaner/preppers/mac.py @@ -21,7 +21,7 @@ class MacPrepper(SoSPrepper): def _get_mac_file_list(self, archive): if archive.is_sos: return ['sos_commands/networking/ip_-d_address'] - elif archive.is_insights: + if archive.is_insights: return ['data/insights_commands/ip_addr'] return [] diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py index fb39a56e46..5eeba25702 100644 --- a/sos/collector/__init__.py +++ b/sos/collector/__init__.py @@ -169,9 +169,9 @@ def __init__(self, parser, parsed_args, cmdline_args): # get the local hostname and addresses to filter from results later self.hostname = socket.gethostname() try: - self.ip_addrs = list(set([ + self.ip_addrs = list({ i[4][0] for i in socket.getaddrinfo(socket.gethostname(), None) - ])) + }) except Exception: # this is almost always a DNS issue with reverse resolution # set a safe fallback and log the issue @@ -195,8 +195,6 @@ def __init__(self, parser, parsed_args, cmdline_args): except KeyboardInterrupt: self.exit('Exiting on user cancel', 130) - except Exception: - raise def load_clusters(self): """Loads all cluster types supported by the local installation for @@ -237,7 +235,7 @@ def _find_modules_in_path(cls, path, modulename): continue if '__' in pyfile: continue - fname, ext = os.path.splitext(pyfile) + fname, _ = os.path.splitext(pyfile) modname = f'sos.collector.{modulename}.{fname}' modules.extend(cls._import_modules(modname)) return modules @@ -253,7 +251,7 @@ def _import_modules(cls, modname): f' {e.__class__.__name__}') raise e modules = inspect.getmembers(module, inspect.isclass) - for mod in modules: + for mod in modules.copy(): if mod[0] in ('SosHost', 'Cluster'): modules.remove(mod) return modules @@ -522,9 +520,9 @@ def display_help(cls, section): section.add_text( 'The following help sections may be of further interest:\n' ) - for hsec in hsections: + for hsec, value in hsections.items(): section.add_text( - f"{' ':>8}{bold(hsec):<40}{hsections[hsec]:<30}", + f"{' ':>8}{bold(hsec):<40}{value:<30}", newline=False ) @@ -564,7 +562,7 @@ def _parse_options(self): """ self.commons = { 'cmdlineopts': self.opts, - 'need_sudo': True if self.opts.ssh_user != 'root' else False, + 'need_sudo': self.opts.ssh_user != 'root', 'tmpdir': self.tmpdir, 'hostlen': max(len(self.opts.primary), len(self.hostname)), 'policy': self.policy @@ -596,8 +594,8 @@ def verify_cluster_options(self): if self.opts.cluster_options: for opt in self.opts.cluster_options: match = False - for clust in self.clusters: - for option in self.clusters[clust].options: + for clust, value in self.clusters.items(): + for option in value.options: if opt.name == option.name and opt.cluster == clust: match = True opt.value = self._validate_option(option, opt) @@ -619,18 +617,13 @@ def _validate_option(self, default, cli): msg = "Invalid option type for %s. Expected %s got %s" self.exit(msg % (cli.name, default.opt_type, cli.opt_type), 1) return cli.value - else: - val = cli.value.lower() - if val not in ['true', 'on', 'yes', 'false', 'off', 'no']: - msg = ("Invalid value for %s. Accepted values are: 'true', " - "'false', 'on', 'off', 'yes', 'no'.") - self.exit(msg % cli.name, 1) - else: - if val in ['true', 'on', 'yes']: - return True - else: - return False - self.exit(f"Unknown option type: {cli.opt_type}") + + val = cli.value.lower() + if val not in ['true', 'on', 'yes', 'false', 'off', 'no']: + msg = ("Invalid value for %s. Accepted values are: 'true', " + "'false', 'on', 'off', 'yes', 'no'.") + self.exit(msg % cli.name, 1) + return val in ['true', 'on', 'yes'] def log_info(self, msg): """Log info messages to both console and log file""" @@ -663,9 +656,9 @@ def list_options(self): ) _opts = {} - for _cluster in self.clusters: - for opt in self.clusters[_cluster].options: - if opt.name not in _opts.keys(): + for _, value in self.clusters.items(): + for opt in value.options: + if opt.name not in _opts: _opts[opt.name] = opt else: for clust in opt.cluster: @@ -753,7 +746,7 @@ def _load_group_config(self): self.log_debug(f"Loading host group {fname}") - with open(fname, 'r') as hf: + with open(fname, 'r', encoding='utf-8') as hf: _group = json.load(hf) for key in ['primary', 'cluster_type']: if _group[key]: @@ -776,7 +769,7 @@ def write_host_group(self): 'name': self.opts.save_group, 'primary': self.opts.primary, 'cluster_type': self.cluster.cluster_type[0], - 'nodes': [n for n in self.node_list] + 'nodes': list(self.node_list) } if os.getuid() != 0: group_path = os.path.join(Path.home(), '.config/sos/groups.d') @@ -785,7 +778,7 @@ def write_host_group(self): else: group_path = COLLECTOR_CONFIG_DIR fname = os.path.join(group_path, cfg['name']) - with open(fname, 'w') as hf: + with open(fname, 'w', encoding='utf-8') as hf: json.dump(cfg, hf) os.chmod(fname, 0o600) return fname @@ -1072,7 +1065,7 @@ def reduce_node_list(self): # an open session to it. if self.primary is not None and not self.cluster.strict_node_list: for n in self.node_list: - if n == self.primary.hostname or n == self.opts.primary: + if n in (self.primary.hostname, self.opts.primary): self.node_list.remove(n) self.node_list = list(set(n for n in self.node_list if n)) self.log_debug(f'Node list reduced to {self.node_list}') @@ -1165,7 +1158,7 @@ def intro(self): """Print the intro message and prompts for a case ID if one is not provided on the command line """ - disclaimer = ("""\ + disclaimer = """\ This utility is used to collect sos reports from multiple \ nodes simultaneously. Remote connections are made and/or maintained \ to those nodes via well-known transport protocols such as SSH. @@ -1179,7 +1172,7 @@ def intro(self): No configuration changes will be made to the system running \ this utility or remote systems that it connects to. -""") +""" self.ui_log.info(f"\nsos collect (version {__version__})\n") intro_msg = self._fmt_msg(disclaimer % self.tmpdir) self.ui_log.info(intro_msg) @@ -1288,7 +1281,7 @@ def collect(self): self.log_info(msg % (self.retrieved, self.report_num)) self.close_all_connections() if self.retrieved > 0: - arc_name = self.create_cluster_archive() + self.arc_name = self.create_cluster_archive() else: msg = 'No sos reports were collected, nothing to archive...' self.exit(msg, 1) @@ -1296,7 +1289,7 @@ def collect(self): if (self.opts.upload and self.policy.get_upload_url()) or \ self.opts.upload_s3_endpoint: try: - self.policy.upload_archive(arc_name) + self.policy.upload_archive(self.arc_name) self.ui_log.info("Uploaded archive successfully") except Exception as err: self.ui_log.error(f"Upload attempt failed: {err}") @@ -1427,3 +1420,5 @@ def create_cluster_archive(self): msg = (f"Could not finalize archive: {err}\n\nData may still be " f"available uncompressed at {self.archive_path}") self.exit(msg, 2) + # Never gets here. This is to fix "inconsistent-return-statements + return "Archive error" diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py index e66233cde2..eee010c558 100644 --- a/sos/collector/clusters/__init__.py +++ b/sos/collector/clusters/__init__.py @@ -132,7 +132,7 @@ def display_help(cls, section): # pylint: disable=too-many-branches if cls.sos_plugin_options: _opts = cls.sos_plugin_options - opts = ', '.join(f"{opt}={_opts[opt]}" for opt in _opts) + opts = ', '.join(f"{k}={v}" for k, v in _opts.items()) section.add_text( f"Sets the following plugin options: {opts}", newline=False @@ -260,7 +260,6 @@ def set_node_options(self, node): :param node: The non-primary node :type node: ``SoSNode`` """ - pass def set_transport_type(self): """The default connection type used by sos collect is to leverage the @@ -280,7 +279,6 @@ def set_primary_options(self, node): :param node: The primary node :type node: ``SoSNode`` """ - pass def check_node_is_primary(self, node): """In the event there are multiple primaries, or if the collect command @@ -324,7 +322,6 @@ def setup(self): extra commands to be run even if a node list is given by the user, and thus get_nodes() would not be called """ - pass def check_enabled(self): """ @@ -352,7 +349,6 @@ def cleanup(self): This helps ensure that sos does make lasting changes to the environment in which we are running """ - pass def get_nodes(self): """ @@ -364,7 +360,7 @@ def get_nodes(self): :returns: A list of node FQDNs or IP addresses :rtype: ``list`` or ``None`` """ - pass + raise NotImplementedError def _get_nodes(self): try: @@ -388,7 +384,7 @@ def get_node_label(self, node): node.manifest.add_field('label', label) return label - def set_node_label(self, node): + def set_node_label(self, node): # pylint: disable=unused-argument """This may be overridden by clusters profiles subclassing this class If there is a distinction between primaries and nodes, or types of @@ -407,7 +403,8 @@ def format_node_list(self): try: nodes = self.get_nodes() except Exception as err: - raise Exception(f"Cluster failed to enumerate nodes: {err}") + raise Exception(f"Cluster failed to enumerate nodes: {err}") \ + from err if isinstance(nodes, list): node_list = [n.strip() for n in nodes if n] elif isinstance(nodes, str): diff --git a/sos/collector/clusters/juju.py b/sos/collector/clusters/juju.py index a8ef68fb3d..898bdd925d 100644 --- a/sos/collector/clusters/juju.py +++ b/sos/collector/clusters/juju.py @@ -89,9 +89,9 @@ def add_subordinates(self, juju_status): continue units = juju_status["applications"][parent]["units"] - for unit, unit_info in units.items(): + for _, unit_info in units.items(): node = f"{self.model_name}:{unit_info['machine']}" - for sub_key, sub_value in unit_info.get( + for sub_key, _ in unit_info.get( "subordinates", {} ).items(): if sub_key.startswith(app + "/"): @@ -176,13 +176,7 @@ def _execute_juju_status(self, model_name): juju_json_output = self._cleanup_juju_output((res["output"])) juju_status = None - try: - juju_status = json.loads(juju_json_output) - except json.JSONDecodeError: - raise Exception( - "Juju output is not valid json format." - f"Output: {juju_json_output}" - ) + juju_status = json.loads(juju_json_output) return juju_status def _filter_by_pattern(self, key, patterns, model_info): diff --git a/sos/collector/clusters/kubernetes.py b/sos/collector/clusters/kubernetes.py index 50f7fa4206..f8c715b6df 100644 --- a/sos/collector/clusters/kubernetes.py +++ b/sos/collector/clusters/kubernetes.py @@ -47,7 +47,6 @@ def get_nodes(self): if node[2] in roles: nodes.append(node[0]) return nodes - else: - raise Exception('Node enumeration did not return usable output') + raise Exception('Node enumeration did not return usable output') # vim: set et ts=4 sw=4 : diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py index d4af8747a1..265c1e4dc9 100644 --- a/sos/collector/clusters/ocp.py +++ b/sos/collector/clusters/ocp.py @@ -232,8 +232,8 @@ def _build_dict(self, nodelist): for node in nodelist: _node = node.split() nodes[_node[0]] = {} - for column in idx: - nodes[_node[0]][column] = _node[idx[column]] + for column, value in idx.items(): + nodes[_node[0]][column] = _node[value] return nodes def set_transport_type(self): @@ -264,7 +264,7 @@ def get_nodes(self): self.log_warn("NOTE: By default, only master nodes are listed." "\nTo collect from all/more nodes, override the " "role option with '-c ocp.role=role1:role2'") - roles = [r for r in self.get_option('role').split(':')] + roles = list(self.get_option('role').split(':')) self.node_dict = self._build_dict(res['output'].splitlines()) for node_name, node in self.node_dict.items(): if roles: @@ -290,10 +290,10 @@ def set_node_label(self, node): return label return '' - def check_node_is_primary(self, sosnode): - if sosnode.address not in self.node_dict: + def check_node_is_primary(self, node): + if node.address not in self.node_dict: return False - return 'master' in self.node_dict[sosnode.address]['roles'] + return 'master' in self.node_dict[node.address]['roles'] def _toggle_api_opt(self, node, use_api): """In earlier versions of sos, the openshift plugin option that is @@ -368,7 +368,7 @@ def set_primary_options(self, node): elif node.file_exists(_kubeconfig): # if the file exists, then the openshift sos plugin will use it # if the with-api option is turned on - if not _kubeconfig == master_kube: + if _kubeconfig != master_kube: node.plugopts.append( f"openshift.kubeconfig={_kubeconfig}" ) diff --git a/sos/collector/clusters/openstack.py b/sos/collector/clusters/openstack.py index c20ec069b1..47adf719cb 100644 --- a/sos/collector/clusters/openstack.py +++ b/sos/collector/clusters/openstack.py @@ -56,7 +56,7 @@ def get_nodes(self): _inv = yaml.safe_load(self.primary.read_file(INVENTORY)) except Exception as err: self.log_info(f"Error parsing yaml: {err}") - raise Exception("Could not parse yaml for node addresses") + raise Exception("Could not parse yaml for node addresses") from err try: for _t in ['Controller', 'Compute']: # fields are titled in the yaml, but our opts are lowercase diff --git a/sos/collector/clusters/ovirt.py b/sos/collector/clusters/ovirt.py index 1d358a9c02..90cb85161b 100644 --- a/sos/collector/clusters/ovirt.py +++ b/sos/collector/clusters/ovirt.py @@ -127,9 +127,7 @@ def get_nodes(self): if res['status'] == 0: nodes = res['output'].splitlines()[2:-1] return [n.split('(')[0].strip() for n in nodes] - else: - raise Exception('database query failed, return code: ' - f'{res["status"]}') + raise Exception(f'database query failed, return code: {res["status"]}') def run_extra_cmd(self): if not self.get_option('no-database') and self.conf: @@ -188,8 +186,7 @@ def set_node_label(self, node): return 'manager' if node.is_installed('ovirt-node-ng-nodectl'): return 'rhvh' - else: - return 'rhelh' + return 'rhelh' class rhhi_virt(rhv): diff --git a/sos/collector/clusters/pacemaker.py b/sos/collector/clusters/pacemaker.py index 6c3ee30213..824ae930d2 100644 --- a/sos/collector/clusters/pacemaker.py +++ b/sos/collector/clusters/pacemaker.py @@ -63,7 +63,7 @@ def get_nodes_from_crm(self): _ver = self.exec_primary_cmd('crm_mon --version') if _ver['status'] == 0: cver = _ver['output'].split()[1].split('-')[0] - if not sos_parse_version(cver) > sos_parse_version('2.0.3'): + if sos_parse_version(cver) <= sos_parse_version('2.0.3'): xmlopt = '--as-xml' else: return diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py index ec390b4385..0a3effaa4d 100644 --- a/sos/collector/sosnode.py +++ b/sos/collector/sosnode.py @@ -106,6 +106,7 @@ def __init__(self, address, commons, password=None, local_sudo=None, if self.host.containerized: self.create_sos_container() self._load_sos_info() + return None @property def connected(self): @@ -126,9 +127,9 @@ def _load_remote_transport(self, commons): if self.address in ['localhost', '127.0.0.1']: self.local = True return LocalTransport(self.address, commons) - elif self.opts.transport in TRANSPORTS.keys(): + if self.opts.transport in TRANSPORTS: return TRANSPORTS[self.opts.transport](self.address, commons) - elif self.opts.transport != 'auto': + if self.opts.transport != 'auto': self.log_error( "Connection failed: unknown or unsupported transport " f"{self.opts.transport}" @@ -159,7 +160,7 @@ def set_node_manifest(self, manifest): """ self.manifest = manifest self.manifest.add_field('hostname', self._hostname) - self.manifest.add_field('policy', self.host.distro) + self.manifest.add_field('policy', self.host.os_release_name) self.manifest.add_field('sos_version', self.sos_info['version']) self.manifest.add_field('final_sos_command', '') self.manifest.add_field('transport', self._transport.name) @@ -194,7 +195,7 @@ def create_sos_container(self): "and password or authfile" ) raise Exception - elif 'unknown: Not found' in res['output']: + if 'unknown: Not found' in res['output']: self.log_error('Specified image not found on registry') raise Exception # 'name exists' with code 125 means the container was @@ -207,14 +208,13 @@ def create_sos_container(self): self.log_info("Temporary container " f"{self.host.sos_container_name} created") return True - else: - self.log_error("Could not start container after create: " - f"{ret['output']}") - raise Exception - else: - self.log_error("Could not create container on host: " - f"{res['output']}") + self.log_error("Could not start container after create: " + f"{ret['output']}") raise Exception + + self.log_error("Could not create container on host: " + f"{res['output']}") + raise Exception return False def get_container_auth(self): @@ -226,10 +226,9 @@ def get_container_auth(self): self.opts.registry_user, self.opts.registry_password ) - else: - return self.host.runtimes['default'].fmt_registry_authfile( - self.opts.registry_authfile or self.host.container_authfile - ) + return self.host.runtimes['default'].fmt_registry_authfile( + self.opts.registry_authfile or self.host.container_authfile + ) def file_exists(self, fname, need_root=False): """Checks for the presence of fname on the remote node""" @@ -391,14 +390,14 @@ def determine_host_policy(self): """ if self.local: self.log_info( - f"using local policy {self.commons['policy'].distro}") + f"using local policy {self.commons['policy'].os_release_name}") return self.commons['policy'] host = load(cache={}, sysroot=self.opts.sysroot, init=InitSystem(), probe_runtime=True, remote_exec=self._transport.run_command, remote_check=self.read_file('/etc/os-release')) if host: - self.log_info(f"loaded policy {host.distro} for host") + self.log_info(f"loaded policy {host.os_release_name} for host") return host self.log_error('Unable to determine host installation. Ignoring node') raise UnsupportedHostException @@ -751,8 +750,7 @@ def determine_sos_error(self, rc, stdout): return 'sos report terminated unexpectedly. Check disk space' if len(stdout) > 0: return stdout.split('\n')[0:1] - else: - return f'sos exited with code {rc}' + return f'sos exited with code {rc}' def execute_sos_command(self): """Run sos report and capture the resulting file path""" @@ -807,10 +805,9 @@ def retrieve_file(self, path): if self.file_exists(path): self.log_info(f"Copying remote {path} to local {destdir}") return self._transport.retrieve_file(path, dest) - else: - self.log_debug(f"Attempting to copy remote file {path}, but it" - " does not exist on filesystem") - return False + self.log_debug(f"Attempting to copy remote file {path}, but it" + " does not exist on filesystem") + return False except Exception as err: self.log_debug(f"Failed to retrieve {path}: {err}") return False @@ -830,10 +827,9 @@ def remove_file(self, path): cmd = f"rm -f {path}" res = self.run_command(cmd, need_root=True) return res['status'] == 0 - else: - self.log_debug(f"Attempting to remove remote file {path}, but " - "it does not exist on filesystem") - return False + self.log_debug(f"Attempting to remove remote file {path}, but " + "it does not exist on filesystem") + return False except Exception as e: self.log_debug(f'Failed to remove {path}: {e}') return False @@ -857,9 +853,8 @@ def retrieve_sosreport(self): self.ui_msg('Successfully collected sos report') self.file_list.append(self.sos_path.split('/')[-1]) return True - else: - self.ui_msg('Failed to retrieve sos report') - return False + self.ui_msg('Failed to retrieve sos report') + return False def remove_sos_archive(self): """Remove the sos report archive from the node, since we have @@ -919,9 +914,8 @@ def make_archive_readable(self, filepath): res = self.run_command(cmd, timeout=10, need_root=True) if res['status'] == 0: return True - else: - msg = "Exception while making %s readable. Return code was %s" - self.log_error(msg % (filepath, res['status'])) - raise Exception + msg = "Exception while making %s readable. Return code was %s" + self.log_error(msg % (filepath, res['status'])) + raise Exception # vim: set et ts=4 sw=4 : diff --git a/sos/collector/transports/__init__.py b/sos/collector/transports/__init__.py index b501b4a535..5780719333 100644 --- a/sos/collector/transports/__init__.py +++ b/sos/collector/transports/__init__.py @@ -304,7 +304,7 @@ def _run_command_with_pexpect(self, cmd, timeout, need_root, env): out = result.before result.close() return {'status': result.exitstatus, 'output': out} - elif index == 1: + if index == 1: raise CommandTimeoutException(cmd) # if we somehow manage to flow to this point, use this bogus exit code # as a signal to debugging efforts that whatever went sideways did so @@ -331,7 +331,7 @@ def _send_pexpect_password(self, index, result): result.sendline(self.opts.sudo_pw) elif index == 3: if not self.opts.root_password: - msg = ("Unable to run command as root: no root password given") + msg = "Unable to run command as root: no root password given" self.log_error(msg) raise Exception(msg) result.sendline(self.opts.root_password) @@ -399,12 +399,11 @@ def _read_file(self, fname): res = self.run_command(f"cat {fname}", timeout=10) if res['status'] == 0: return res['output'] + if 'No such file' in res['output']: + self.log_debug(f"File {fname} does not exist on node") else: - if 'No such file' in res['output']: - self.log_debug(f"File {fname} does not exist on node") - else: - self.log_error(f"Error reading {fname}: " - f"{res['output'].split(':')[1:]}") - return '' + self.log_error(f"Error reading {fname}: " + f"{res['output'].split(':')[1:]}") + return '' # vim: set et ts=4 sw=4 : diff --git a/sos/collector/transports/control_persist.py b/sos/collector/transports/control_persist.py index 393a172ebb..9a814831e1 100644 --- a/sos/collector/transports/control_persist.py +++ b/sos/collector/transports/control_persist.py @@ -61,12 +61,12 @@ def _check_for_control_persist(self): True if ControlPersist is supported, else raise Exception. """ ssh_cmd = ['ssh', '-o', 'ControlPersist'] - cmd = subprocess.Popen(ssh_cmd, stdout=subprocess.PIPE, - stderr=subprocess.PIPE) - out, err = cmd.communicate() - err = err.decode('utf-8') - if 'Bad configuration option' in err or 'Usage:' in err: - raise ControlPersistUnsupportedException + with subprocess.Popen(ssh_cmd, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) as cmd: + _, err = cmd.communicate() + err = err.decode('utf-8') + if 'Bad configuration option' in err or 'Usage:' in err: + raise ControlPersistUnsupportedException return True def _connect(self, password=''): # pylint: disable=too-many-branches @@ -95,7 +95,7 @@ def _connect(self, password=''): # pylint: disable=too-many-branches "Please update your OpenSSH installation.") raise self.log_info('Opening SSH session to create control socket') - self.control_path = (f"{self.tmpdir}/.sos-collector-{self.address}") + self.control_path = f"{self.tmpdir}/.sos-collector-{self.address}" self.ssh_cmd = '' connected = False ssh_key = '' @@ -112,11 +112,11 @@ def _connect(self, password=''): # pylint: disable=too-many-branches res = pexpect.spawn(cmd, encoding='utf-8') connect_expects = [ - u'Connected', - u'password:', - u'.*Permission denied.*', - u'.* port .*: No route to host', - u'.*Could not resolve hostname.*', + 'Connected', + 'password:', + '.*Permission denied.*', + '.* port .*: No route to host', + '.*Could not resolve hostname.*', pexpect.TIMEOUT ] @@ -127,8 +127,8 @@ def _connect(self, password=''): # pylint: disable=too-many-branches elif index == 1: if password: pass_expects = [ - u'Connected', - u'Permission denied, please try again.', + 'Connected', + 'Permission denied, please try again.', pexpect.TIMEOUT ] res.sendline(password) diff --git a/sos/collector/transports/juju.py b/sos/collector/transports/juju.py index bea95fb6f9..f0572ec3a7 100644 --- a/sos/collector/transports/juju.py +++ b/sos/collector/transports/juju.py @@ -40,9 +40,9 @@ def _check_juju_installed(self): cmd = "juju version" try: subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) - except subprocess.CalledProcessError: + except subprocess.CalledProcessError as err: self.log_error("Failed to check `juju` version") - raise JujuNotInstalledException + raise JujuNotInstalledException from err return True def _chmod(self, fname): diff --git a/sos/collector/transports/local.py b/sos/collector/transports/local.py index 52cd0d1570..f40a210588 100644 --- a/sos/collector/transports/local.py +++ b/sos/collector/transports/local.py @@ -43,7 +43,7 @@ def _format_cmd_for_exec(self, cmd): def _read_file(self, fname): if os.path.exists(fname): - with open(fname, 'r') as rfile: + with open(fname, 'r', encoding='utf-8') as rfile: return rfile.read() self.log_debug(f"No such file: {fname}") return '' diff --git a/sos/collector/transports/oc.py b/sos/collector/transports/oc.py index 8a45db5a86..b38f81a9cb 100644 --- a/sos/collector/transports/oc.py +++ b/sos/collector/transports/oc.py @@ -167,7 +167,7 @@ def _connect(self, password): podconf = self.get_node_pod_config() self.pod_name = podconf['metadata']['name'] fd, self.pod_tmp_conf = tempfile.mkstemp(dir=self.tmpdir) - with open(fd, 'w') as cfile: + with open(fd, 'w', encoding='utf-8') as cfile: json.dump(podconf, cfile) self.log_debug(f"Starting sos collector container '{self.pod_name}'") # this specifically does not need to run with a project definition diff --git a/sos/collector/transports/saltstack.py b/sos/collector/transports/saltstack.py index 6833064c2e..5e817baac1 100644 --- a/sos/collector/transports/saltstack.py +++ b/sos/collector/transports/saltstack.py @@ -64,6 +64,7 @@ def connected(self): up = self.run_command("echo Connected", timeout=10) return up['status'] == 0 + # pylint: disable=unused-argument def _check_for_saltstack(self, password=None): """Checks to see if the local system supported SaltStack Master. @@ -82,8 +83,7 @@ def _check_for_saltstack(self, password=None): res = sos_get_command_output(cmd) if res['status'] == 0: return res['status'] == 0 - else: - raise SaltStackMasterUnsupportedException + raise SaltStackMasterUnsupportedException def _connect(self, password=None): """Connect to the remote host using SaltStack Master. diff --git a/sos/component.py b/sos/component.py index 68ff2b6a64..17bc22e3ed 100644 --- a/sos/component.py +++ b/sos/component.py @@ -131,7 +131,7 @@ def __init__(self, parser, parsed_args, cmdline_args): self.manifest.add_field('compression', '') self.manifest.add_field('tmpdir', self.tmpdir) self.manifest.add_field('tmpdir_fs_type', self.tmpfstype) - self.manifest.add_field('policy', self.policy.distro) + self.manifest.add_field('policy', self.policy.os_release_name) self.manifest.add_section('components') def load_local_policy(self): @@ -148,7 +148,7 @@ def execute(self): raise NotImplementedError def get_exit_handler(self): - def exit_handler(signum, frame): + def exit_handler(signum, frame): # pylint: disable=unused-argument self.exit_process = True self._exit() return exit_handler @@ -191,7 +191,7 @@ def get_tmpdir_default(self): def check_listing_options(self): opts = [o for o in self.opts.dict().keys() if o.startswith('list')] if opts: - return any([getattr(self.opts, opt) for opt in opts]) + return any(getattr(self.opts, opt) for opt in opts) return False @classmethod @@ -199,7 +199,7 @@ def add_parser_options(cls, parser): """This should be overridden by each subcommand to add its own unique options to the parser """ - pass + raise NotImplementedError def apply_options_from_cmdline(self, opts): """(Re-)apply options specified via the cmdline to an options instance @@ -353,8 +353,7 @@ def setup_archive(self, name=''): if self.opts.encrypt: self._get_encryption_method() enc_opts = { - 'encrypt': True if (self.opts.encrypt_pass or - self.opts.encrypt_key) else False, + 'encrypt': self.opts.encrypt_pass or self.opts.encrypt_key, 'key': self.opts.encrypt_key, 'password': self.opts.encrypt_pass } @@ -480,10 +479,7 @@ def __getitem__(self, item): return self._values[item] def __getattr__(self, attr): - try: - return self._values[attr] - except Exception: - raise AttributeError(attr) + return self._values[attr] def add_field(self, field_name, content): """Add a key, value entry to the current metadata instance diff --git a/sos/help/__init__.py b/sos/help/__init__.py index 2de2f38895..e5162d98e7 100644 --- a/sos/help/__init__.py +++ b/sos/help/__init__.py @@ -115,9 +115,9 @@ def get_obj_for_topic(self): 'collector.transports.': self._get_collect_transport, 'collector.clusters.': self._get_collect_cluster, } - for _sec in _help: + for _sec, value in _help.items(): if self.opts.topic.startswith(_sec): - cls = _help[_sec]() + cls = value() break return cls @@ -209,11 +209,8 @@ def display_self_help(self): 'policies': 'How sos operates on different distributions' } - for sect in sections: - avail_help.add_text( - f"\t{bold(sect):<36}{sections[sect]}", - newline=False - ) + for sect, value in sections.items(): + avail_help.add_text(f"\t{bold(sect):<36}{value}", newline=False) self_help.display() diff --git a/sos/options.py b/sos/options.py index c8899158da..527f67981b 100644 --- a/sos/options.py +++ b/sos/options.py @@ -7,8 +7,7 @@ # See the LICENSE file in the source distribution for further information. from argparse import Action -from configparser import (ConfigParser, ParsingError, Error, - DuplicateOptionError) +from configparser import ConfigParser def _is_seq(val): @@ -22,10 +21,9 @@ def str_to_bool(val): _val = val.lower() if _val in ['true', 'on', 'yes']: return True - elif _val in ['false', 'off', 'no']: + if _val in ['false', 'off', 'no']: return False - else: - return None + return None class SoSOptions(): @@ -112,9 +110,9 @@ def __init__(self, arg_defaults={}, **kwargs): for arg in self.arg_defaults: setattr(self, arg, self.arg_defaults[arg]) # next, load any kwargs - for arg in kwargs.keys(): + for arg, kwarg in kwargs.items(): self.arg_names.append(arg) - setattr(self, arg, kwargs[arg]) + setattr(self, arg, kwarg) @classmethod def from_args(cls, args, arg_defaults={}): @@ -161,20 +159,16 @@ def _convert_to_type(self, key, val, conf): if isinstance(self.arg_defaults[key], type(val)): return val if isinstance(self.arg_defaults[key], list): - return [v for v in val.split(',')] + return list(val.split(',')) if isinstance(self.arg_defaults[key], bool): val = str_to_bool(val) if val is None: raise Exception( f"Value of '{key}' in {conf} must be True or False or " "analagous") - else: - return val + return val if isinstance(self.arg_defaults[key], int): - try: - return int(val) - except ValueError: - raise Exception(f"Value of '{key}' in {conf} must be integer") + return int(val) return val def update_from_conf(self, config_file, component): @@ -222,14 +216,8 @@ def _update_from_section(section, config): config = ConfigParser() try: - try: - with open(config_file) as f: - config.read_file(f, config_file) - except DuplicateOptionError as err: - raise exit(f"Duplicate option '{err.option}' in section " - f"'{err.section}' in file {config_file}") - except (ParsingError, Error): - raise exit(f'Failed to parse configuration file {config_file}') + with open(config_file, encoding='utf-8') as f: + config.read_file(f, config_file) except (OSError, IOError) as e: print( f'WARNING: Unable to read configuration file {config_file} : ' @@ -337,7 +325,7 @@ class SosListOption(Action): """Allow to specify comma delimited list of plugins""" def __call__(self, parser, namespace, values, option_string=None): - items = [opt for opt in values.split(',')] + items = list(values.split(',')) if getattr(namespace, self.dest): items += getattr(namespace, self.dest) setattr(namespace, self.dest, items) diff --git a/sos/policies/__init__.py b/sos/policies/__init__.py index 23a66d5bde..4394f29e7f 100644 --- a/sos/policies/__init__.py +++ b/sos/policies/__init__.py @@ -42,6 +42,7 @@ def load(cache={}, sysroot=None, init=None, probe_runtime=True, cache['policy'] = policy(sysroot=sysroot, init=init, probe_runtime=probe_runtime, remote_exec=remote_exec) + break if sys.platform != 'linux': raise Exception("SoS is not supported on this platform") @@ -75,8 +76,16 @@ class Policy(): SoSTransport in use :type remote_exec: ``SoSTranport.run_command()`` - :cvar distro: The name of the distribution the Policy represents - :vartype distro: ``str`` + :cvar os_release_name: The name of the distribution as it appears in the + os-release (-esque) file for the NAME variable. + :vartype os_release_name: ``str`` + + :cvar os_release_id: The ID variable to match in a distribution's release + file. + :vartype os_release_id: ``str`` + + :cvar os_release_file: The filepath of the distribution's os-release file + :vartype os_release_file: ``str`` :cvar vendor: The name of the vendor producing the distribution :vartype vendor: ``str`` @@ -97,7 +106,7 @@ class Policy(): msg = _("""\ This command will collect system configuration and diagnostic information \ -from this %(distro)s system. +from this %(os_release_name)s system. For more information on %(vendor)s visit: @@ -111,8 +120,9 @@ class Policy(): %(vendor_text)s """) - - distro = "Unknown" + os_release_name = 'Unknown' + os_release_file = '' + os_release_id = '' vendor = "Unknown" vendor_urls = [('Example URL', "http://www.example.com/")] vendor_text = "" @@ -141,7 +151,8 @@ def __init__(self, sysroot=None, probe_runtime=True, remote_exec=None): self.sysroot = sysroot self.register_presets(GENERIC_PRESETS) - def check(self, remote=''): + @classmethod + def check(cls, remote=''): """ This function is responsible for determining if the underlying system is supported by this policy. @@ -153,7 +164,7 @@ def check(self, remote=''): :returns: ``True`` if the Policy should be loaded, else ``False`` :rtype: ``bool`` """ - return False + raise NotImplementedError @property def forbidden_paths(self): @@ -196,7 +207,6 @@ def dist_version(self): """ Return the OS version """ - pass def get_preferred_archive(self): """ @@ -307,13 +317,11 @@ def pre_work(self): """ This function is called prior to collection. """ - pass def post_work(self): """ This function is called after the sos report has been generated. """ - pass def pkg_by_name(self, pkg): """Wrapper to retrieve a package from the Policy's package manager @@ -328,7 +336,7 @@ def pkg_by_name(self, pkg): def _parse_uname(self): (system, node, release, - version, machine, processor) = platform.uname() + version, machine, _) = platform.uname() self.system = system self.hostname = node self.release = release @@ -353,7 +361,7 @@ def is_root(self): :returns: ``True`` if user is superuser, else ``False`` :rtype: ``bool`` """ - return (os.getuid() == 0) + return os.getuid() == 0 def get_preferred_hash_name(self): """Returns the string name of the hashlib-supported checksum algorithm @@ -399,11 +407,8 @@ def display_help(cls, section): seealso.add_text( "For more information on distribution policies, see below\n" ) - for pol in pols: - seealso.add_text( - f"{' ':>8}{pol:<20}{pols[pol]:<30}", - newline=False - ) + for pol, value in pols.items(): + seealso.add_text(f"{' ':>8}{pol:<20}{value:<30}", newline=False) def display_results(self, archive, directory, checksum, archivestat=None, map_file=None): @@ -464,8 +469,8 @@ def display_results(self, archive, directory, checksum, archivestat=None, def get_msg(self): """This method is used to prepare the preamble text to display to - the user in non-batch mode. If your policy sets self.distro that - text will be substituted accordingly. You can also override this + the user in non-batch mode. If your policy sets self.os_release_name, + that text will be substituted accordingly. You can also override this method to do something more complicated. :returns: Formatted banner message string @@ -476,7 +481,8 @@ def get_msg(self): else: changes_text = "No changes will be made to system configuration." width = 72 - _msg = self.msg % {'distro': self.distro, 'vendor': self.vendor, + _msg = self.msg % {'os_release_name': self.os_release_name, + 'vendor': self.vendor, 'vendor_urls': self._fmt_vendor_urls(), 'vendor_text': self.vendor_text, 'tmpdir': self.commons['tmpdir'], @@ -493,7 +499,7 @@ def _fmt_vendor_urls(self): :returns: Formatted string of URLS :rtype: ``str`` """ - width = max([len(v[0]) for v in self.vendor_urls]) + width = max(len(v[0]) for v in self.vendor_urls) return "\n".join( f"\t{url[0]:<{width}} : {url[1]}" for url in self.vendor_urls ) @@ -523,9 +529,9 @@ def find_preset(self, preset): :returns: a matching PresetProfile. """ # FIXME: allow fuzzy matching? - for match in self.presets.keys(): + for match, value in self.presets.items(): if match == preset: - return self.presets[match] + return value return None @@ -552,7 +558,7 @@ def load_presets(self, presets_path=None): for preset_path in os.listdir(presets_path): preset_path = os.path.join(presets_path, preset_path) - with open(preset_path) as pf: + with open(preset_path, encoding='utf-8') as pf: try: preset_data = json.load(pf) except ValueError: @@ -581,7 +587,7 @@ def add_preset(self, name=None, desc=None, note=None, opts=SoSOptions()): if not name: raise ValueError("Preset name cannot be empty") - if name in self.presets.keys(): + if name in self.presets: raise ValueError(f"A preset with name '{name}' already exists") preset = PresetDefaults(name=name, desc=desc, note=note, opts=opts) @@ -590,7 +596,7 @@ def add_preset(self, name=None, desc=None, note=None, opts=SoSOptions()): preset.write(presets_path) def del_preset(self, name=""): - if not name or name not in self.presets.keys(): + if not name or name not in self.presets: raise ValueError(f"Unknown profile: '{name}'") preset = self.presets[name] diff --git a/sos/policies/auth/__init__.py b/sos/policies/auth/__init__.py index 9530f6470c..483e3ba5b3 100644 --- a/sos/policies/auth/__init__.py +++ b/sos/policies/auth/__init__.py @@ -17,6 +17,8 @@ import time from datetime import datetime, timedelta +from sos.utilities import TIMEOUT_DEFAULT + DEVICE_AUTH_CLIENT_ID = "sos-tools" GRANT_TYPE_DEVICE_CODE = "urn:ietf:params:oauth:grant-type:device_code" @@ -67,7 +69,8 @@ def _request_device_code(self): res = requests.post( self.client_identifier_url, data=data, - headers=headers) + headers=headers, + timeout=TIMEOUT_DEFAULT) res.raise_for_status() response = res.json() self._user_code = response.get("user_code") @@ -99,7 +102,8 @@ def poll_for_auth_completion(self): time.sleep(self._interval) try: check_auth_completion = requests.post(self.token_endpoint, - data=token_data) + data=token_data, + timeout=TIMEOUT_DEFAULT) status_code = check_auth_completion.status_code @@ -142,13 +146,11 @@ def get_access_token(self): """ if self.is_access_token_valid(): return self._access_token - else: - if self.is_refresh_token_valid(): - self._use_refresh_token_grant() - return self._access_token - else: - self._use_device_code_grant() - return self._access_token + if self.is_refresh_token_valid(): + self._use_refresh_token_grant() + return self._access_token + self._use_device_code_grant() + return self._access_token def is_access_token_valid(self): """ @@ -189,7 +191,8 @@ def _use_refresh_token_grant(self, refresh_token=None): refresh_token else refresh_token} refresh_token_res = requests.post(self.token_endpoint, - data=refresh_token_data) + data=refresh_token_data, + timeout=TIMEOUT_DEFAULT) if refresh_token_res.status_code == 200: self._set_token_data(refresh_token_res.json()) diff --git a/sos/policies/distros/__init__.py b/sos/policies/distros/__init__.py index 92253ac5e6..8c178020ec 100644 --- a/sos/policies/distros/__init__.py +++ b/sos/policies/distros/__init__.py @@ -25,7 +25,7 @@ from sos.policies.runtimes.lxd import LxdContainerRuntime from sos.utilities import (shell_out, is_executable, bold, - sos_get_command_output) + sos_get_command_output, TIMEOUT_DEFAULT) try: @@ -40,6 +40,7 @@ except ImportError: BOTO3_LOADED = False +OS_RELEASE = "/etc/os-release" # Container environment variables for detecting if we're in a container ENV_CONTAINER = 'container' ENV_HOST_SYSROOT = 'HOST' @@ -48,11 +49,14 @@ class LinuxPolicy(Policy): """This policy is meant to be an abc class that provides common implementations used in Linux distros""" - - distro = "Linux" vendor = "None" PATH = "/bin:/sbin:/usr/bin:/usr/sbin" init = None + # the following will be used, in order, as part of check() to validate that + # we are running on a particular distro + os_release_file = '' + os_release_name = '' + os_release_id = '' # _ prefixed class attrs are used for storing any vendor-defined defaults # the non-prefixed attrs are used by the upload methods, and will be set # to the cmdline/config file values, if provided. If not provided, then @@ -126,7 +130,7 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, self.runtimes['default'] = self.runtimes[runtime.name] self.runtimes[runtime.name].load_container_info() - if self.runtimes and 'default' not in self.runtimes.keys(): + if self.runtimes and 'default' not in self.runtimes: # still allow plugins to query a runtime present on the system # even if that is not the policy default one idx = list(self.runtimes.keys()) @@ -139,6 +143,32 @@ def set_forbidden_paths(cls): '/etc/shadow' ] + @classmethod + def check(cls, remote=''): + """ + This function is responsible for determining if the underlying system + is supported by this policy. + """ + def _check_release(content): + _matches = [cls.os_release_name] + if cls.os_release_id: + _matches.append(cls.os_release_id) + for line in content.splitlines(): + if line.startswith(('NAME=', 'ID=')): + _distro = line.split('=')[1:][0].strip("\"'") + if _distro in _matches: + return True + return False + + if remote: + return _check_release(remote) + # use the os-specific file primarily + if os.path.isfile(cls.os_release_file): + return True + # next check os-release for a NAME or ID value we expect + with open(OS_RELEASE, "r", encoding='utf-8') as f: + return _check_release(f.read()) + def kernel_version(self): return self.release @@ -163,7 +193,7 @@ def display_help(cls, section): if cls == LinuxPolicy: cls.display_self_help(section) else: - section.set_title(f"{cls.distro} Distribution Policy") + section.set_title(f"{cls.os_release_name} Distribution Policy") cls.display_distro_help(section) @classmethod @@ -211,11 +241,10 @@ def display_distro_help(cls, section): ), newline=False ) - for preset in _pol.presets: - _preset = _pol.presets[preset] - _opts = ' '.join(_preset.opts.to_args()) + for preset, value in _pol.presets.items(): + _opts = ' '.join(value.opts.to_args()) presec.add_text( - f"{' ':>8}{preset:<20}{_preset.desc:<45}{_opts:<30}", + f"{' ':>8}{preset:<20}{value.desc:<45}{_opts:<30}", newline=False ) @@ -254,7 +283,7 @@ def init_kernel_modules(self): f"/usr/lib/modules/{release}/modules.builtin" ) try: - with open(builtins, "r") as mfile: + with open(builtins, "r", encoding='utf-8') as mfile: for line in mfile: kmod = line.split('/')[-1].split('.ko')[0] self.kernel_mods.append(kmod) @@ -269,18 +298,30 @@ def init_kernel_modules(self): 'dm_mod': 'CONFIG_BLK_DEV_DM' } - booted_config = self.join_sysroot(f"/boot/config-{release}") + kconfigs = ( + f"/boot/config-{release}", + f"/lib/modules/{release}/config", + ) + for kconfig in kconfigs: + kconfig = self.join_sysroot(kconfig) + if os.path.exists(kconfig): + booted_config = kconfig + break + else: + self.soslog.warning("Unable to find booted kernel config") + return + kconfigs = [] try: - with open(booted_config, "r") as kfile: + with open(booted_config, "r", encoding='utf-8') as kfile: for line in kfile: if '=y' in line: kconfigs.append(line.split('=y')[0]) except IOError as err: self.soslog.warning(f"Unable to read booted kernel config: {err}") - for builtin in config_strings: - if config_strings[builtin] in kconfigs: + for builtin, value in config_strings.items(): + if value in kconfigs: self.kernel_mods.append(builtin) def join_sysroot(self, path): @@ -316,16 +357,13 @@ def pre_work(self): # set or query for case id if not cmdline_opts.batch and not \ cmdline_opts.quiet: - try: - if caseid: - self.commons['cmdlineopts'].case_id = caseid - else: - self.commons['cmdlineopts'].case_id = input( - _("Optionally, please enter the case id that you are " - "generating this report for [%s]: ") % caseid - ) - except KeyboardInterrupt: - raise + if caseid: + self.commons['cmdlineopts'].case_id = caseid + else: + self.commons['cmdlineopts'].case_id = input( + _("Optionally, please enter the case id that you are " + "generating this report for [%s]: ") % caseid + ) if cmdline_opts.case_id: self.case_id = cmdline_opts.case_id @@ -333,22 +371,17 @@ def pre_work(self): # setting case id, as below methods might rely on detection of it if not cmdline_opts.batch and not \ cmdline_opts.quiet: - try: - # Policies will need to handle the prompts for user information - if cmdline_opts.upload and self.get_upload_url() and \ - not cmdline_opts.upload_protocol == 's3': - self.prompt_for_upload_user() - self.prompt_for_upload_password() - elif cmdline_opts.upload_protocol == 's3': - self.prompt_for_upload_s3_bucket() - self.prompt_for_upload_s3_endpoint() - self.prompt_for_upload_s3_access_key() - self.prompt_for_upload_s3_secret_key() - self.ui_log.info('') - except KeyboardInterrupt: - raise - - return + # Policies will need to handle the prompts for user information + if cmdline_opts.upload and self.get_upload_url() and \ + not cmdline_opts.upload_protocol == 's3': + self.prompt_for_upload_user() + self.prompt_for_upload_password() + elif cmdline_opts.upload_protocol == 's3': + self.prompt_for_upload_s3_bucket() + self.prompt_for_upload_s3_endpoint() + self.prompt_for_upload_s3_access_key() + self.prompt_for_upload_s3_secret_key() + self.ui_log.info('') def _configure_low_priority(self): """Used to constrain sos to a 'low priority' execution, potentially @@ -520,12 +553,12 @@ def _determine_upload_type(self): 'https': self.upload_https, 's3': self.upload_s3 } - if self.commons['cmdlineopts'].upload_protocol in prots.keys(): + if self.commons['cmdlineopts'].upload_protocol in prots: return prots[self.commons['cmdlineopts'].upload_protocol] - elif '://' not in self.upload_url: + if '://' not in self.upload_url: raise Exception("Must provide protocol in upload URL") - prot, url = self.upload_url.split('://') - if prot not in prots.keys(): + prot, _ = self.upload_url.split('://') + if prot not in prots: raise Exception(f"Unsupported or unrecognized protocol: {prot}") return prots[prot] @@ -632,11 +665,16 @@ def get_upload_url(self): self._upload_url = f"s3://{bucket}/{prefix}" return self.upload_url or self._upload_url + def _get_obfuscated_upload_url(self, url): + pattern = r"([^:]+://[^:]+:)([^@]+)(@.+)" + obfuscated_url = re.sub(pattern, r'\1********\3', url) + return obfuscated_url + def get_upload_url_string(self): """Used by distro policies to potentially change the string used to report upload location from the URL to a more human-friendly string """ - return self.get_upload_url() + return self._get_obfuscated_upload_url(self.get_upload_url()) def get_upload_user(self): """Helper function to determine if we should use the policy default @@ -682,9 +720,9 @@ def upload_sftp(self, user=None, password=None): # via ssh python bindings commonly available among downstreams try: import pexpect - except ImportError: + except ImportError as err: raise Exception('SFTP upload requires python3-pexpect, which is ' - 'not currently installed') + 'not currently installed') from err sftp_connected = False @@ -699,9 +737,9 @@ def upload_sftp(self, user=None, password=None): ret = pexpect.spawn(sftp_cmd, encoding='utf-8') sftp_expects = [ - u'sftp>', - u'password:', - u'Connection refused', + 'sftp>', + 'password:', + 'Connection refused', pexpect.TIMEOUT, pexpect.EOF ] @@ -713,8 +751,8 @@ def upload_sftp(self, user=None, password=None): elif idx == 1: ret.sendline(password) pass_expects = [ - u'sftp>', - u'Permission denied', + 'sftp>', + 'Permission denied', pexpect.TIMEOUT, pexpect.EOF ] @@ -743,10 +781,10 @@ def upload_sftp(self, user=None, password=None): ret.sendline(put_cmd) put_expects = [ - u'100%', + '100%', pexpect.TIMEOUT, pexpect.EOF, - u'No such file or directory' + 'No such file or directory' ] put_success = ret.expect(put_expects, timeout=180) @@ -754,14 +792,13 @@ def upload_sftp(self, user=None, password=None): if put_success == 0: ret.sendline('bye') return True - elif put_success == 1: + if put_success == 1: raise Exception("Timeout expired while uploading") - elif put_success == 2: + if put_success == 2: raise Exception(f"Unknown error during upload: {ret.before}") - elif put_success == 3: + if put_success == 3: raise Exception("Unable to write archive to destination") - else: - raise Exception(f"Unexpected response from server: {ret.before}") + raise Exception(f"Unexpected response from server: {ret.before}") def _get_sftp_upload_name(self): """If a specific file name pattern is required by the SFTP server, @@ -785,7 +822,7 @@ def _upload_https_put(self, archive, verify=True): """ return requests.put(self.get_upload_url(), data=archive, auth=self.get_upload_https_auth(), - verify=verify) + verify=verify, timeout=TIMEOUT_DEFAULT) def _get_upload_headers(self): """Define any needed headers to be passed with the POST request here @@ -805,7 +842,7 @@ def _upload_https_post(self, archive, verify=True): } return requests.post(self.get_upload_url(), files=files, auth=self.get_upload_https_auth(), - verify=verify) + verify=verify, timeout=TIMEOUT_DEFAULT) def upload_https(self): """Attempts to upload the archive to an HTTPS location. @@ -829,7 +866,7 @@ def upload_https(self): r = self._upload_https_put(arc, verify) else: r = self._upload_https_post(arc, verify) - if r.status_code != 200 and r.status_code != 201: + if r.status_code not in (200, 201): if r.status_code == 401: raise Exception( "Authentication failed: invalid user credentials" @@ -859,13 +896,8 @@ def upload_ftp(self, url=None, directory=None, user=None, password=None): :raises: ``Exception`` if upload in unsuccessful """ - try: - import ftplib - import socket - except ImportError: - # socket is part of the standard library, should only fail here on - # ftplib - raise Exception("missing python ftplib library") + import ftplib + import socket if not url: url = self.get_upload_url() @@ -890,31 +922,28 @@ def upload_ftp(self, url=None, directory=None, user=None, password=None): raise Exception("connection failed, did you set a user and " "password?") session.cwd(directory) - except socket.timeout: - raise Exception(f"timeout hit while connecting to {url}") - except socket.gaierror: - raise Exception(f"unable to connect to {url}") + except socket.timeout as err: + raise Exception(f"timeout hit while connecting to {url}") from err + except socket.gaierror as err: + raise Exception(f"unable to connect to {url}") from err except ftplib.error_perm as err: errno = str(err).split()[0] if errno == '503': - raise Exception(f"could not login as '{user}'") + raise Exception(f"could not login as '{user}'") from err if errno == '530': - raise Exception(f"invalid password for user '{user}'") + raise Exception(f"invalid password for user '{user}'") from err if errno == '550': raise Exception("could not set upload directory to " - f"{directory}") - raise Exception(f"error trying to establish session: {str(err)}") + f"{directory}") from err + raise Exception(f"error trying to establish session: {str(err)}") \ + from err - try: - with open(self.upload_archive_name, 'rb') as _arcfile: - session.storbinary( - f"STOR {self.upload_archive_name.split('/')[-1]}", - _arcfile + with open(self.upload_archive_name, 'rb') as _arcfile: + session.storbinary( + f"STOR {self.upload_archive_name.split('/')[-1]}", _arcfile ) - session.quit() - return True - except IOError: - raise Exception("could not open archive file") + session.quit() + return True def upload_s3(self, endpoint=None, region=None, bucket=None, prefix=None, access_key=None, secret_key=None): @@ -996,6 +1025,7 @@ def set_cleanup_cmd(self): """ return '' + # pylint: disable=unused-argument def create_sos_container(self, image=None, auth=None, force_pull=False): """Returns the command that will create the container that will be used for running commands inside a container on hosts that require it. @@ -1045,8 +1075,7 @@ def format_container_command(self, cmd): if self.container_runtime: return (f'{self.container_runtime} exec {self.sos_container_name} ' f'{cmd}') - else: - return cmd + return cmd class GenericLinuxPolicy(LinuxPolicy): @@ -1061,5 +1090,12 @@ class GenericLinuxPolicy(LinuxPolicy): 'users are encouraged to request a new distribution-specifc' ' policy at the GitHub project above.\n') + @classmethod + def check(cls, remote=''): + """ + This function is responsible for determining if the underlying system + is supported by this policy. + """ + raise NotImplementedError # vim: set et ts=4 sw=4 : diff --git a/sos/policies/distros/almalinux.py b/sos/policies/distros/almalinux.py index f86f1c9b6d..563bab99d2 100644 --- a/sos/policies/distros/almalinux.py +++ b/sos/policies/distros/almalinux.py @@ -8,13 +8,14 @@ # # See the LICENSE file in the source distribution for further information. -import os -from sos.policies.distros.redhat import RedHatPolicy, OS_RELEASE +from sos.policies.distros.redhat import RedHatPolicy class AlmaLinuxPolicy(RedHatPolicy): - distro = "AlmaLinux" vendor = "AlmaLinux OS Foundation" + os_release_file = '/etc/almalinux-release' + os_release_name = 'AlmaLinux' + vendor_urls = [ ('Distribution Website', 'https://www.almalinux.org/'), ('Commercial Support', 'https://tuxcare.com/linux-support-services/') @@ -26,21 +27,4 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, probe_runtime=probe_runtime, remote_exec=remote_exec) - @classmethod - def check(cls, remote=''): - if remote: - return cls.distro in remote - - if not os.path.isfile('/etc/almalinux-release'): - return False - - if os.path.exists(OS_RELEASE): - with open(OS_RELEASE, 'r') as f: - for line in f: - if line.startswith('NAME'): - if 'AlmaLinux' in line: - return True - - return False - # vim: set et ts=4 sw=4 : diff --git a/sos/policies/distros/amazon.py b/sos/policies/distros/amazon.py index a888079ae9..86c33cd2ab 100644 --- a/sos/policies/distros/amazon.py +++ b/sos/policies/distros/amazon.py @@ -8,15 +8,14 @@ # # See the LICENSE file in the source distribution for further information. -import os -from sos.policies.distros.redhat import RedHatPolicy, OS_RELEASE +from sos.policies.distros.redhat import RedHatPolicy class AmazonPolicy(RedHatPolicy): - - distro = "Amazon Linux" vendor = "Amazon" vendor_urls = [('Distribution Website', 'https://aws.amazon.com')] + os_release_file = '' + os_release_name = 'Amazon Linux' def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=None): @@ -24,20 +23,4 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, probe_runtime=probe_runtime, remote_exec=remote_exec) - @classmethod - def check(cls, remote=''): - - if remote: - return cls.distro in remote - - if not os.path.exists(OS_RELEASE): - return False - - with open(OS_RELEASE, 'r') as f: - for line in f: - if line.startswith('NAME'): - if 'Amazon Linux' in line: - return True - return False - # vim: set et ts=4 sw=4 : diff --git a/sos/policies/distros/anolis.py b/sos/policies/distros/anolis.py index 79973be739..5f64f8ff57 100644 --- a/sos/policies/distros/anolis.py +++ b/sos/policies/distros/anolis.py @@ -6,15 +6,14 @@ # # See the LICENSE file in the source distribution for further information. -import os -from sos.policies.distros.redhat import RedHatPolicy, OS_RELEASE +from sos.policies.distros.redhat import RedHatPolicy class AnolisPolicy(RedHatPolicy): - - distro = "Anolis OS" vendor = "The OpenAnolis Project" vendor_urls = [('Distribution Website', 'https://openanolis.org/')] + os_release_file = '/etc/anolis-release' + os_release_name = 'Anolis OS' def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=None): @@ -22,25 +21,4 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, probe_runtime=probe_runtime, remote_exec=remote_exec) - @classmethod - def check(cls, remote=''): - - if remote: - return cls.distro in remote - - # Return False if /etc/os-release is missing - if not os.path.exists(OS_RELEASE): - return False - - # Return False if /etc/anolis-release is missing - if not os.path.isfile('/etc/anolis-release'): - return False - - with open(OS_RELEASE, 'r') as f: - for line in f: - if line.startswith('NAME'): - if 'Anolis OS' in line: - return True - return False - # vim: set et ts=4 sw=4 : diff --git a/sos/policies/distros/azure.py b/sos/policies/distros/azure.py index 6f0e45ad56..9d9b4bffde 100644 --- a/sos/policies/distros/azure.py +++ b/sos/policies/distros/azure.py @@ -8,18 +8,17 @@ # # See the LICENSE file in the source distribution for further information. -import os from sos.report.plugins import AzurePlugin -from sos.policies.distros.redhat import RedHatPolicy, OS_RELEASE +from sos.policies.distros.redhat import RedHatPolicy class AzurePolicy(RedHatPolicy): - - distro = "Azure Linux" vendor = "Microsoft" vendor_urls = [ ('Distribution Website', 'https://github.com/microsoft/azurelinux') ] + os_release_name = 'Microsoft Azure Linux' + os_release_file = '' def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=None): @@ -28,22 +27,8 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=remote_exec) self.valid_subclasses += [AzurePlugin] - @classmethod - def check(cls, remote=''): - - if remote: - return cls.distro in remote - - if not os.path.exists(OS_RELEASE): - return False - with open(OS_RELEASE, 'r') as f: - for line in f: - if line.startswith('NAME'): - if 'Common Base Linux Mariner' in line: - return True - if 'Microsoft Azure Linux' in line: - return True - return False +class CBLMarinerPolicy(AzurePolicy): + os_release_name = 'Common Base Linux Mariner' # vim: set et ts=4 sw=4 : diff --git a/sos/policies/distros/circle.py b/sos/policies/distros/circle.py index 8532963bce..cc28ec2cca 100644 --- a/sos/policies/distros/circle.py +++ b/sos/policies/distros/circle.py @@ -8,15 +8,14 @@ # # See the LICENSE file in the source distribution for further information. -import os -from sos.policies.distros.redhat import RedHatPolicy, OS_RELEASE +from sos.policies.distros.redhat import RedHatPolicy class CirclePolicy(RedHatPolicy): - - distro = "Circle Linux" vendor = "The Circle Linux Project" vendor_urls = [('Distribution Website', 'https://cclinux.org')] + os_release_file = '/etc/circle-release' + os_release_name = 'Circle Linux' def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=None): @@ -24,26 +23,4 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, probe_runtime=probe_runtime, remote_exec=remote_exec) - @classmethod - def check(cls, remote=''): - - if remote: - return cls.distro in remote - - # Return False if /etc/os-release is missing - if not os.path.exists(OS_RELEASE): - return False - - # Return False if /etc/circle-release is missing - if not os.path.isfile('/etc/circle-release'): - return False - - with open(OS_RELEASE, 'r') as f: - for line in f: - if line.startswith('NAME'): - if 'Circle Linux' in line: - return True - - return False - # vim: set et ts=4 sw=4 : diff --git a/sos/policies/distros/cloudlinux.py b/sos/policies/distros/cloudlinux.py new file mode 100644 index 0000000000..90b240a80b --- /dev/null +++ b/sos/policies/distros/cloudlinux.py @@ -0,0 +1,29 @@ +# Copyright (C) Eduard Abdullin + +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.policies.distros.redhat import RedHatPolicy + + +class CloudLinuxPolicy(RedHatPolicy): + vendor = "CloudLinux" + vendor_urls = [ + ('Distribution Website', 'https://www.cloudlinux.com/'), + ('Commercial Support', 'https://www.cloudlinux.com/') + ] + os_release_file = '/etc/cloudlinux-release' + os_release_name = 'CloudLinux' + + def __init__(self, sysroot=None, init=None, probe_runtime=True, + remote_exec=None): + super().__init__(sysroot=sysroot, init=init, + probe_runtime=probe_runtime, + remote_exec=remote_exec) + +# vim: set et ts=4 sw=4 : diff --git a/sos/policies/distros/cos.py b/sos/policies/distros/cos.py index bf28d03ce8..b88cf519c8 100644 --- a/sos/policies/distros/cos.py +++ b/sos/policies/distros/cos.py @@ -27,12 +27,13 @@ def _blank_or_comment(line): class CosPolicy(LinuxPolicy): - distro = "Container-Optimized OS" vendor = "Google Cloud Platform" vendor_urls = [ ('Distribution Website', 'https://cloud.google.com/container-optimized-os/') ] + os_release_name = 'Container-Optimized OS' + os_release_id = 'cos' valid_subclasses = [CosPlugin, IndependentPlugin] PATH = "/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin" @@ -43,17 +44,4 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=remote_exec) self.valid_subclasses += [CosPolicy] - @classmethod - def check(cls, remote=''): - if remote: - return cls.distro in remote - - try: - with open('/etc/os-release', 'r') as fp: - os_release = dict(line.strip().split('=') for line in fp - if not _blank_or_comment(line)) - return os_release['ID'] == 'cos' - except (IOError, KeyError): - return False - # vim: set et ts=4 sw=4 : diff --git a/sos/policies/distros/debian.py b/sos/policies/distros/debian.py index 26743792b5..7db96f9d4c 100644 --- a/sos/policies/distros/debian.py +++ b/sos/policies/distros/debian.py @@ -6,17 +6,16 @@ # # See the LICENSE file in the source distribution for further information. -import os - from sos.report.plugins import DebianPlugin from sos.policies.distros import LinuxPolicy from sos.policies.package_managers.dpkg import DpkgPackageManager class DebianPolicy(LinuxPolicy): - distro = "Debian" vendor = "the Debian project" vendor_urls = [('Community Website', 'https://www.debian.org/')] + os_release_name = 'Debian' + os_release_file = '/etc/debian_version' name_pattern = 'friendly' valid_subclasses = [DebianPlugin] PATH = "/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games" \ @@ -50,19 +49,9 @@ def _get_pkg_name_for_binary(self, binary): "xz": "xz-utils" }.get(binary, binary) - @classmethod - def check(cls, remote=''): - """This method checks to see if we are running on Debian. - It returns True or False.""" - - if remote: - return cls.distro in remote - - return os.path.isfile('/etc/debian_version') - def dist_version(self): try: - with open('/etc/os-release', 'r') as fp: + with open('/etc/os-release', 'r', encoding='utf-8') as fp: rel_string = "" lines = fp.readlines() for line in lines: diff --git a/sos/policies/distros/opencloudos.py b/sos/policies/distros/opencloudos.py index ff3c8af5d9..f471fa4b5d 100644 --- a/sos/policies/distros/opencloudos.py +++ b/sos/policies/distros/opencloudos.py @@ -7,14 +7,14 @@ # # See the LICENSE file in the source distribution for further information. -import os -from sos.policies.distros.redhat import RedHatPolicy, OS_RELEASE +from sos.policies.distros.redhat import RedHatPolicy class OpenCloudOSPolicy(RedHatPolicy): - distro = "OpenCloudOS Stream" vendor = "OpenCloudOS" vendor_urls = [('Distribution Website', 'https://www.opencloudos.org/')] + os_release_name = 'OpenCloudOS Stream' + os_release_file = '' def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=None): @@ -22,21 +22,4 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, probe_runtime=probe_runtime, remote_exec=remote_exec) - @classmethod - def check(cls, remote=''): - - if remote: - return cls.distro in remote - - if not os.path.exists(OS_RELEASE): - return False - - with open(OS_RELEASE, 'r') as f: - for line in f: - if line.startswith('NAME'): - if 'OpenCloudOS Stream' in line: - return True - - return False - # vim: set et ts=4 sw=4 : diff --git a/sos/policies/distros/openeuler.py b/sos/policies/distros/openeuler.py index 2c361ec171..8610ae1ed9 100644 --- a/sos/policies/distros/openeuler.py +++ b/sos/policies/distros/openeuler.py @@ -6,15 +6,15 @@ # # See the LICENSE file in the source distribution for further information. -import os from sos.report.plugins import OpenEulerPlugin -from sos.policies.distros.redhat import RedHatPolicy, OS_RELEASE +from sos.policies.distros.redhat import RedHatPolicy class OpenEulerPolicy(RedHatPolicy): - distro = "openEuler" vendor = "The openEuler Project" vendor_urls = [('Distribution Website', 'https://openeuler.org/')] + os_release_name = 'openEuler' + os_release_file = '' def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=None): @@ -24,20 +24,4 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, self.valid_subclasses += [OpenEulerPlugin] - @classmethod - def check(cls, remote=''): - - if remote: - return cls.distro in remote - - if not os.path.exists(OS_RELEASE): - return False - - with open(OS_RELEASE, 'r') as f: - for line in f: - if line.startswith('NAME'): - if 'openEuler' in line: - return True - return False - # vim: set et ts=4 sw=4 : diff --git a/sos/policies/distros/redhat.py b/sos/policies/distros/redhat.py index e29752ec59..abe5cc1d90 100644 --- a/sos/policies/distros/redhat.py +++ b/sos/policies/distros/redhat.py @@ -18,11 +18,11 @@ from sos.presets.redhat import (RHEL_PRESETS, RHV, RHEL, CB, RHOSP, RHOCP, RH_CFME, RH_SATELLITE, AAPEDA, AAPCONTROLLER) -from sos.policies.distros import LinuxPolicy, ENV_HOST_SYSROOT +from sos.policies.distros import LinuxPolicy, ENV_HOST_SYSROOT, OS_RELEASE from sos.policies.package_managers.rpm import RpmPackageManager from sos.policies.package_managers.flatpak import FlatpakPackageManager from sos.policies.package_managers import MultiPackageManager -from sos.utilities import bold, convert_bytes +from sos.utilities import bold, convert_bytes, TIMEOUT_DEFAULT from sos import _sos as _ try: @@ -31,12 +31,10 @@ except ImportError: REQUESTS_LOADED = False -OS_RELEASE = "/etc/os-release" RHEL_RELEASE_STR = "Red Hat Enterprise Linux" class RedHatPolicy(LinuxPolicy): - distro = "Red Hat" vendor = "Red Hat" vendor_urls = [ ('Distribution Website', 'https://www.redhat.com/'), @@ -89,18 +87,6 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, self.set_exec_path() self.load_presets() - @classmethod - def check(cls, remote=''): - """This method checks to see if we are running on Red Hat. It must be - overriden by concrete subclasses to return True when running on a - Fedora, RHEL or other Red Hat distribution or False otherwise. - - If `remote` is provided, it should be the contents of a remote host's - os-release, or comparable, file to be used in place of the locally - available one. - """ - return False - @classmethod def display_distro_help(cls, section): if cls is not RedHatPolicy: @@ -119,10 +105,10 @@ def display_distro_help(cls, section): 'fedora': FedoraPolicy } - for subc in subs: + for subc, value in subs.items(): subln = bold(f"policies.{subc}") section.add_text( - f"{' ':>8}{subln:<35}{subs[subc].distro:<30}", + f"{' ':>8}{subln:<35}{value.os_release_name:<30}", newline=False ) @@ -138,9 +124,8 @@ def check_usrmove(self, pkgs): """ if 'filesystem' not in pkgs: return os.path.islink('/bin') and os.path.islink('/sbin') - else: - filesys_version = pkgs['filesystem']['version'] - return True if filesys_version[0] == '3' else False + filesys_version = pkgs['filesystem']['version'] + return filesys_version[0] == '3' def mangle_package_path(self, files): """Mangle paths for post-UsrMove systems. @@ -165,8 +150,7 @@ def transform_path(path): for f in files: paths.extend(transform_path(f)) return paths - else: - return files + return files def get_tmp_dir(self, opt_tmp_dir): if not opt_tmp_dir: @@ -218,11 +202,13 @@ class RHELPolicy(RedHatPolicy): technical support engineer. This information will be printed at the end of the upload process for any sos report execution. """ - distro = RHEL_RELEASE_STR vendor = "Red Hat" + os_release_file = '/etc/redhat-release' + os_release_name = RHEL_RELEASE_STR + os_release_id = 'rhel' msg = _("""\ This command will collect diagnostic and configuration \ -information from this %(distro)s system and installed \ +information from this %(os_release_name)s system and installed \ applications. An archive containing the collected information will be \ @@ -242,34 +228,6 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=remote_exec) self.register_presets(RHEL_PRESETS) - @classmethod - def check(cls, remote=''): - """Test to see if the running host is a RHEL installation. - - Checks for the presence of the "Red Hat Enterprise Linux" - release string at the beginning of the NAME field in the - `/etc/os-release` file and returns ``True`` if it is - found, and ``False`` otherwise. - - :returns: ``True`` if the host is running RHEL or ``False`` - otherwise. - """ - - if remote: - return cls.distro in remote - - if not os.path.exists(OS_RELEASE): - return False - - with open(OS_RELEASE, "r") as f: - for line in f: - if line.startswith("NAME"): - (name, value) = line.split("=") - value = value.strip("\"'") - if value.startswith(cls.distro): - return True - return False - def prompt_for_upload_user(self): if self.commons['cmdlineopts'].upload_user: self.ui_log.info( @@ -288,21 +246,19 @@ def prompt_for_upload_password(self): _("The option --upload-pass has been deprecated in favour" " of device authorization in RHEL") ) - return def get_upload_url(self): if self.upload_url: return self.upload_url - elif self.commons['cmdlineopts'].upload_url: + if self.commons['cmdlineopts'].upload_url: return self.commons['cmdlineopts'].upload_url - elif self.commons['cmdlineopts'].upload_protocol == 'sftp': + if self.commons['cmdlineopts'].upload_protocol == 'sftp': return RH_SFTP_HOST - elif not self.commons['cmdlineopts'].case_id: + if not self.commons['cmdlineopts'].case_id: self.ui_log.info("No case id provided, uploading to SFTP") return RH_SFTP_HOST - else: - rh_case_api = "/support/v1/cases/%s/attachments" - return RH_API_HOST + rh_case_api % self.case_id + rh_case_api = "/support/v1/cases/%s/attachments" + return RH_API_HOST + rh_case_api % self.case_id def _get_upload_https_auth(self): str_auth = f"Bearer {self._device_token}" @@ -331,7 +287,7 @@ def _upload_https_post(self, archive, verify=True): f"{self.get_upload_url_string()}") return requests.post(self.get_upload_url(), files=files, headers=self._get_upload_https_auth(), - verify=verify) + verify=verify, timeout=TIMEOUT_DEFAULT) def _get_upload_headers(self): if self.get_upload_url().startswith(RH_API_HOST): @@ -341,9 +297,9 @@ def _get_upload_headers(self): def get_upload_url_string(self): if self.get_upload_url().startswith(RH_API_HOST): return "Red Hat Customer Portal" - elif self.get_upload_url().startswith(RH_SFTP_HOST): + if self.get_upload_url().startswith(RH_SFTP_HOST): return "Red Hat Secure FTP" - return self.upload_url + return self._get_obfuscated_upload_url(self.upload_url) def _get_sftp_upload_name(self): """The RH SFTP server will only automatically connect file uploads to @@ -356,7 +312,8 @@ def _get_sftp_upload_name(self): fname = os.path.join(self.upload_directory, fname) return fname - def upload_sftp(self): # pylint: disable=too-many-branches + # pylint: disable=too-many-branches + def upload_sftp(self, user=None, password=None): """Override the base upload_sftp to allow for setting an on-demand generated anonymous login for the RH SFTP server if a username and password are not given @@ -437,16 +394,14 @@ def check_file_too_big(self, archive): # There's really no need to transform the size to Gb, # so we don't need to call any size converter implemented # in tools.py - if (size >= self._max_size_request): + if size >= self._max_size_request: self.ui_log.warning( _("Size of archive is bigger than Red Hat Customer Portal " "limit for uploads of " f"{convert_bytes(self._max_size_request)} " " via sos http upload. \n") ) - return RH_SFTP_HOST - else: - return RH_API_HOST + self.upload_url = RH_SFTP_HOST def upload_archive(self, archive): """Override the base upload_archive to provide for automatic failover @@ -454,19 +409,18 @@ def upload_archive(self, archive): """ try: if self.get_upload_url().startswith(RH_API_HOST): - self.upload_url = self.check_file_too_big(archive) + self.check_file_too_big(archive) uploaded = super().upload_archive(archive) except Exception as e: uploaded = False if not self.upload_url.startswith(RH_API_HOST): raise - else: - self.ui_log.error( - _(f"Upload to Red Hat Customer Portal failed due to " - f"{e}. Trying {RH_SFTP_HOST}") + self.ui_log.error( + _(f"Upload to Red Hat Customer Portal failed due to " + f"{e}. Trying {RH_SFTP_HOST}") ) - self.upload_url = RH_SFTP_HOST - uploaded = super().upload_archive(archive) + self.upload_url = RH_SFTP_HOST + uploaded = super().upload_archive(archive) return uploaded def dist_version(self): @@ -508,9 +462,11 @@ def probe_preset(self): class CentOsPolicy(RHELPolicy): - distro = "CentOS" vendor = "CentOS" vendor_urls = [('Community Website', 'https://www.centos.org/')] + os_release_file = '/etc/centos-release' + os_release_name = 'CentOS Linux' + os_release_id = 'centos' class RedHatCoreOSPolicy(RHELPolicy): @@ -534,10 +490,10 @@ class RedHatCoreOSPolicy(RHELPolicy): impact how sos report collections are performed. """ - distro = "Red Hat CoreOS" + os_release_name = "Red Hat Enterprise Linux CoreOS" msg = _("""\ This command will collect diagnostic and configuration \ -information from this %(distro)s system. +information from this %(os_release_name)s system. An archive containing the collected information will be \ generated in %(tmpdir)s and may be provided to a %(vendor)s \ @@ -568,9 +524,9 @@ def check(cls, remote=''): return coreos host_release = os.environ[ENV_HOST_SYSROOT] + OS_RELEASE try: - with open(host_release, 'r') as hfile: + with open(host_release, 'r', encoding='utf-8') as hfile: for line in hfile.read().splitlines(): - coreos |= 'Red Hat Enterprise Linux CoreOS' in line + coreos |= cls.os_release_name in line except IOError: # host release file not present, will fallback to RHEL policy check pass @@ -614,13 +570,14 @@ class FedoraPolicy(RedHatPolicy): for that location via --upload-user and --upload-pass (or the appropriate environment variables). """ - - distro = "Fedora" vendor = "the Fedora Project" vendor_urls = [ ('Community Website', 'https://fedoraproject.org/'), ('Community Forums', 'https://discussion.fedoraproject.org/') ] + os_release_file = '/etc/fedora-release' + os_release_name = 'Fedora Linux' + os_release_id = 'fedora' def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=None): @@ -628,16 +585,6 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, probe_runtime=probe_runtime, remote_exec=remote_exec) - @classmethod - def check(cls, remote=''): - """This method checks to see if we are running on Fedora. It returns - True or False.""" - - if remote: - return cls.distro in remote - - return os.path.isfile('/etc/fedora-release') - def fedora_version(self): pkg = self.pkg_by_name("fedora-release") or \ self.package_manager.all_pkgs_by_name_regex( diff --git a/sos/policies/distros/rocky.py b/sos/policies/distros/rocky.py index c51f535cae..da046ed85f 100644 --- a/sos/policies/distros/rocky.py +++ b/sos/policies/distros/rocky.py @@ -8,17 +8,17 @@ # # See the LICENSE file in the source distribution for further information. -import os -from sos.policies.distros.redhat import RedHatPolicy, OS_RELEASE +from sos.policies.distros.redhat import RedHatPolicy class RockyPolicy(RedHatPolicy): - distro = "Rocky Linux" vendor = "Rocky Enterprise Software Foundation" vendor_urls = [ ('Distribution Website', 'https://rockylinux.org'), ('Vendor Website', 'https://resf.org') ] + os_release_file = '/etc/rocky-release' + os_release_name = 'Rocky Linux' def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=None): @@ -26,27 +26,4 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, probe_runtime=probe_runtime, remote_exec=remote_exec) - @classmethod - def check(cls, remote=''): - if remote: - return cls.distro in remote - - # Return False if /etc/os-release is missing - if not os.path.exists(OS_RELEASE): - return False - - # Return False if /etc/rocky-release is missing - if not os.path.isfile('/etc/rocky-release'): - return False - - # If we've gotten this far, check for Rocky in - # /etc/os-release - with open(OS_RELEASE, 'r') as f: - for line in f: - if line.startswith('NAME'): - if 'Rocky Linux' in line: - return True - - return False - # vim: set et ts=4 sw=4 : diff --git a/sos/policies/distros/suse.py b/sos/policies/distros/suse.py index 8c7900e55a..03fc36988b 100644 --- a/sos/policies/distros/suse.py +++ b/sos/policies/distros/suse.py @@ -17,7 +17,7 @@ class SuSEPolicy(LinuxPolicy): - distro = "SuSE" + os_release_name = "SuSE" vendor = "SuSE" vendor_urls = [('Distribution Website', 'https://www.suse.com/')] _tmp_dir = "/var/tmp" @@ -59,12 +59,13 @@ def get_local_name(self): class OpenSuSEPolicy(SuSEPolicy): - distro = "OpenSuSE" vendor = "SuSE" vendor_urls = [('Community Website', 'https://www.opensuse.org/')] + os_release_name = "OpenSuSE" + os_release_file = '/etc/SUSE-brand' msg = _("""\ This command will collect diagnostic and configuration \ -information from this %(distro)s system and installed \ +information from this %(os_release_name)s system and installed \ applications. An archive containing the collected information will be \ @@ -81,12 +82,4 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, probe_runtime=probe_runtime, remote_exec=remote_exec) - @classmethod - def check(cls, remote): - """This method checks to see if we are running on SuSE. - """ - - if remote: - return cls.distro in remote - - return os.path.isfile('/etc/SUSE-brand') +# vim: set et ts=4 sw=4 : diff --git a/sos/policies/distros/ubuntu.py b/sos/policies/distros/ubuntu.py index 8d63c972d5..f241f4f90f 100644 --- a/sos/policies/distros/ubuntu.py +++ b/sos/policies/distros/ubuntu.py @@ -17,12 +17,13 @@ class UbuntuPolicy(DebianPolicy): - distro = "Ubuntu" vendor = "Canonical" vendor_urls = [ ('Community Website', 'https://www.ubuntu.com/'), ('Commercial Support', 'https://www.canonical.com') ] + os_release_name = 'Ubuntu' + os_release_file = '' PATH = "/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games" \ + ":/usr/local/sbin:/usr/local/bin:/snap/bin" _upload_url = "https://files.support.canonical.com/uploads/" @@ -52,25 +53,11 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, self.valid_subclasses += [UbuntuPlugin] - @classmethod - def check(cls, remote=''): - """This method checks to see if we are running on Ubuntu. - It returns True or False.""" - - if remote: - return cls.distro in remote - - try: - with open('/etc/lsb-release', 'r') as fp: - return "Ubuntu" in fp.read() - except IOError: - return False - def dist_version(self): """ Returns the version stated in DISTRIB_RELEASE """ try: - with open('/etc/lsb-release', 'r') as fp: + with open('/etc/lsb-release', 'r', encoding='utf-8') as fp: lines = fp.readlines() for line in lines: if "DISTRIB_RELEASE" in line: @@ -79,17 +66,15 @@ def dist_version(self): except (IOError, ValueError): return False - def get_upload_https_auth(self): + def get_upload_https_auth(self, user=None, password=None): if self.upload_url.startswith(self._upload_url): return (self._upload_user, self._upload_password) - else: - return super().get_upload_https_auth() + return super().get_upload_https_auth() def get_upload_url_string(self): if self.upload_url.startswith(self._upload_url): return "Canonical Support File Server" - else: - return self.get_upload_url() + return self._get_obfuscated_upload_url(self.get_upload_url()) def get_upload_url(self): if not self.upload_url or self.upload_url.startswith(self._upload_url): diff --git a/sos/policies/distros/uniontechserver.py b/sos/policies/distros/uniontechserver.py index 29163dbf4e..68cdf45074 100644 --- a/sos/policies/distros/uniontechserver.py +++ b/sos/policies/distros/uniontechserver.py @@ -6,14 +6,14 @@ # # See the LICENSE file in the source distribution for further information. -import os -from sos.policies.distros.redhat import RedHatPolicy, OS_RELEASE +from sos.policies.distros.redhat import RedHatPolicy class UnionTechPolicy(RedHatPolicy): - distro = "UnionTech OS Server" vendor = "The UnionTech Project" vendor_urls = [('Distribution Website', 'https://www.chinauos.com/')] + os_release_name = 'UnionTech OS Server' + os_release_file = '' def __init__(self, sysroot=None, init=None, probe_runtime=True, remote_exec=None): @@ -21,20 +21,4 @@ def __init__(self, sysroot=None, init=None, probe_runtime=True, probe_runtime=probe_runtime, remote_exec=remote_exec) - @classmethod - def check(cls, remote=''): - - if remote: - return cls.distro in remote - - if not os.path.exists(OS_RELEASE): - return False - - with open(OS_RELEASE, 'r') as f: - for line in f: - if line.startswith('NAME'): - if 'UnionTech OS Server' in line: - return True - return False - # vim: set et ts=4 sw=4 : diff --git a/sos/policies/init_systems/__init__.py b/sos/policies/init_systems/__init__.py index 0c6a54ea01..aa9cbfc9fa 100644 --- a/sos/policies/init_systems/__init__.py +++ b/sos/policies/init_systems/__init__.py @@ -83,6 +83,7 @@ def is_service(self, name): """ return name in self.services + # pylint: disable=unused-argument def is_running(self, name, default=True): """Checks if the given service name is in a running state. @@ -111,7 +112,7 @@ def load_all_services(self): This must be overridden by anything that subclasses `InitSystem` in order for service methods to function properly """ - pass + raise NotImplementedError def _query_service(self, name): """Query an individual service""" @@ -148,7 +149,7 @@ def get_service_names(self, regex): :type regex: ``str`` """ reg = re.compile(regex, re.I) - return [s for s in self.services.keys() if reg.match(s)] + return [s for s in self.services if reg.match(s)] def get_service_status(self, name): """Get the status for the given service name along with the output diff --git a/sos/policies/package_managers/__init__.py b/sos/policies/package_managers/__init__.py index acea40b050..d5518006d9 100644 --- a/sos/policies/package_managers/__init__.py +++ b/sos/policies/package_managers/__init__.py @@ -69,7 +69,7 @@ def packages(self): @property def manager_name(self): - return self.__class__.__name__.lower().split('package')[0] + return self.__class__.__name__.lower().split('package', maxsplit=1)[0] def exec_cmd(self, command, timeout=30, need_root=False, env=None, use_shell=False, chroot=None): @@ -137,7 +137,7 @@ def all_pkgs_by_name_regex(self, regex_name, flags=0): :rtype: ``list`` """ reg = re.compile(regex_name, flags) - return [pkg for pkg in self.packages.keys() if reg.match(pkg)] + return [pkg for pkg in self.packages if reg.match(pkg)] def pkg_by_name(self, name): """ @@ -274,9 +274,9 @@ def build_verify_command(self, packages): verify_packages = "" for package_list in verify_list: for package in package_list: - if any([f in package for f in self.verify_filter]): + if any(f in package for f in self.verify_filter): continue - if len(verify_packages): + if verify_packages: verify_packages += " " verify_packages += package return self.verify_command + " " + verify_packages @@ -335,6 +335,23 @@ def __init__(self, primary, fallbacks, chroot=None, remote_exec=None): self._managers = [self.primary] self._managers.extend(self.fallbacks) + def _parse_pkg_list(self, pkg_list): + """ + Using the output of `query_command`, build the _packages dict. + + This should be overridden by distinct package managers and be a + generator for _generate_pkg_list which will insert the packages into + the _packages dict. + + This method should yield a tuple of name, version, release for each + package parsed. If the package manager or distribution does not use a + release field, set it to None. + + :param pkg_list: The output of the result of `query_command` + :type pkg_list: ``str`` + """ + raise NotImplementedError + def all_files(self): if not self.files: for pm in self._managers: diff --git a/sos/presets/__init__.py b/sos/presets/__init__.py index 4764d926ea..8c34aea461 100644 --- a/sos/presets/__init__.py +++ b/sos/presets/__init__.py @@ -90,7 +90,8 @@ def write(self, presets_path): if not os.path.exists(presets_path): os.makedirs(presets_path, mode=0o755) - with open(os.path.join(presets_path, self.name), "w") as pfile: + with open(os.path.join(presets_path, self.name), "w", + encoding='utf-8') as pfile: json.dump(pdict, pfile) def delete(self, presets_path): diff --git a/sos/presets/redhat/__init__.py b/sos/presets/redhat/__init__.py index 9f6c44d560..9b2662eb59 100644 --- a/sos/presets/redhat/__init__.py +++ b/sos/presets/redhat/__init__.py @@ -80,7 +80,7 @@ verify=True, all_logs=True, profiles=_cb_profiles, plugopts=_cb_plugopts ) -CB_NOTE = ("Data collection will be limited to a boot-affecting scope") +CB_NOTE = "Data collection will be limited to a boot-affecting scope" NOTE_SIZE = "This preset may increase report size" NOTE_TIME = "This preset may increase report run time" diff --git a/sos/report/__init__.py b/sos/report/__init__.py index 6f96edd9bd..444880901d 100644 --- a/sos/report/__init__.py +++ b/sos/report/__init__.py @@ -20,8 +20,9 @@ from datetime import datetime import glob +from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import TimeoutError as FuturesTimeoutError from shutil import rmtree -from concurrent.futures import ThreadPoolExecutor, TimeoutError import sos.report.plugins from sos.utilities import (ImporterHelper, SoSTimeoutError, bold, @@ -439,8 +440,8 @@ def display_help(cls, section): 'policies': 'How sos operates on different distributions' } helpln = '' - for ln in help_lines: - ssec.add_text(f"\t{ln:<36}{help_lines[ln]}", newline=False) + for ln, value in help_lines.items(): + ssec.add_text(f"\t{ln:<36}{value}", newline=False) ssec.add_text(helpln) def print_header(self): @@ -779,16 +780,16 @@ def _add_sos_logs(self): def _is_in_profile(self, plugin_class): only_plugins = self.opts.only_plugins - if not len(self.opts.profiles): + if not self.opts.profiles: return True if not hasattr(plugin_class, "profiles"): return False if only_plugins and not self._is_not_specified(plugin_class.name()): return True - return any([p in self.opts.profiles for p in plugin_class.profiles]) + return any(p in self.opts.profiles for p in plugin_class.profiles) def _is_skipped(self, plugin_name): - return (plugin_name in self.opts.skip_plugins) + return plugin_name in self.opts.skip_plugins def _is_inactive(self, plugin_name, pluginClass): return (not pluginClass(self.get_commons()).check_enabled() and @@ -835,10 +836,10 @@ def load_plugins(self): # validate and load plugins for plug in plugins: - plugbase, ext = os.path.splitext(plug) + plugbase, __ = os.path.splitext(plug) try: plugin_classes = import_plugin(plugbase, valid_plugin_classes) - if not len(plugin_classes): + if not plugin_classes: # no valid plugin classes for this policy continue @@ -847,7 +848,7 @@ def load_plugins(self): if not validate_plugin(plugin_class, experimental=self.opts.experimental): self.soslog.warning( - _("plugin %s does not validate, skipping") % plug) + _(f"plugin {plug} does not validate, skipping")) if self.opts.verbosity > 0: self._skip(plugin_class, _("does not validate")) continue @@ -887,18 +888,18 @@ def load_plugins(self): remaining_profiles.remove(i) self._load(plugin_class) except Exception as e: - self.soslog.warning(_("plugin %s does not install, " - "skipping: %s") % (plug, e)) + self.soslog.warning(_(f"plugin {plug} does not install, " + f"skipping: {e}")) self.handle_exception() if len(remaining_profiles) > 0: - self.soslog.error(_("Unknown or inactive profile(s) provided:" - " %s") % ", ".join(remaining_profiles)) + self.soslog.error(_('Unknown or inactive profile(s) provided:' + f' {", ".join(remaining_profiles)}')) self.list_profiles() self._exit(1) def _set_all_options(self): if self.opts.alloptions: - for plugname, plug in self.loaded_plugins: + for __, plug in self.loaded_plugins: for opt in plug.options.values(): if bool in opt.val_type: opt.value = True @@ -954,7 +955,7 @@ def _set_tunables(self): self.soslog.error(err) self._exit(1) del opts[plugname] - for plugname in opts.keys(): + for plugname in opts: self.soslog.error('WARNING: unable to set option for disabled ' f'or non-existing plugin ({plugname}).') # in case we printed warnings above, visually intend them from @@ -978,7 +979,7 @@ def _check_for_unknown_plugins(self): ) def _set_plugin_options(self): - for plugin_name, plugin in self.loaded_plugins: + for __, plugin in self.loaded_plugins: for opt in plugin.options: self.all_options.append(plugin.options[opt]) @@ -1011,7 +1012,7 @@ def _set_estimate_only(self): def _report_profiles_and_plugins(self): self.ui_log.info("") - if len(self.loaded_plugins): + if self.loaded_plugins: self.ui_log.info(f" {len(self.profiles)} profiles, " f"{len(self.loaded_plugins)} plugins") else: @@ -1028,7 +1029,7 @@ def list_plugins(self): self.ui_log.info(_("The following plugins are currently enabled:")) self.ui_log.info("") for (plugname, plug) in self.loaded_plugins: - self.ui_log.info(f"{plugname:<20} {plug.get_description()}") + self.ui_log.info(f" {plugname:<20} {plug.get_description()}") else: self.ui_log.info(_("No plugin enabled.")) self.ui_log.info("") @@ -1038,7 +1039,7 @@ def list_plugins(self): "disabled:")) self.ui_log.info("") for (plugname, plugclass, reason) in self.skipped_plugins: - self.ui_log.info(f"{plugname:<20} {reason:<14} " + self.ui_log.info(f" {plugname:<20} {reason:<14} " f"{plugclass.get_description()}") self.ui_log.info("") @@ -1059,7 +1060,7 @@ def list_plugins(self): val = TIMEOUT_DEFAULT if opt.name == 'postproc': val = not self.opts.no_postproc - self.ui_log.info(f"{opt.name:<25} {val:<15} {opt.desc}") + self.ui_log.info(f" {opt.name:<25} {val:<15} {opt.desc}") self.ui_log.info("") self.ui_log.info(_("The following plugin options are available:")) @@ -1125,14 +1126,14 @@ def list_presets(self): if not preset: continue preset = self.policy.find_preset(preset) - self.ui_log.info(f"name: {preset.name:>14}") - self.ui_log.info(f"description: {preset.desc:>14}") + self.ui_log.info(f"{'name:':>14} {preset.name}") + self.ui_log.info(f"{'description:':>14} {preset.desc}") if preset.note: - self.ui_log.info(f"note: {preset.note:>14}") + self.ui_log.info(f"{'note:':>14} {preset.note}") if self.opts.verbosity > 0: args = preset.opts.to_args() - options_str = f"{'options:':>14}" + options_str = f"{'options:':>14} " lines = _format_list(options_str, args, indent=True, sep=' ') for line in lines: self.ui_log.info(line) @@ -1181,7 +1182,7 @@ def del_preset(self, name): try: policy.del_preset(name=name) except Exception as e: - self.ui_log.error(str(e) + "\n") + self.ui_log.error(f"{str(e)}\n") return False self.ui_log.info(f"Deleted preset '{name}'\n") @@ -1256,7 +1257,7 @@ def setup(self): plug.manifest.add_field('setup_end', end) plug.manifest.add_field('setup_time', end - start) except KeyboardInterrupt: - raise + raise KeyboardInterrupt # pylint: disable=raise-missing-from except (OSError, IOError) as e: if e.errno in fatal_fs_errors: self.ui_log.error("") @@ -1318,7 +1319,7 @@ def _collect_plugin(self, plugin): end = datetime.now() _plug.manifest.add_field('end_time', end) _plug.manifest.add_field('run_time', end - start) - except TimeoutError: + except FuturesTimeoutError: msg = f"Plugin {plugin[1]} timed out" # log to ui_log.error to show the user, log to soslog.info # so that someone investigating the sos execution has it all @@ -1371,13 +1372,14 @@ def collect_plugin(self, plugin): self.pluglist.remove(plugin) except ValueError: self.soslog.debug( - f"Could not remove {plugin} from plugin list, ignoring..." + f"Could not remove {plugname} from plugin list, " + f"ignoring..." ) try: self.running_plugs.remove(plugname) except ValueError: self.soslog.debug( - f"Could not remove {plugin} from running plugin list, " + f"Could not remove {plugname} from running plugin list, " f"ignoring..." ) status = '' @@ -1449,7 +1451,7 @@ def generate_reports(self): cmd['file'] ))) - for content, f, tags in plug.copy_strings: + for __, f, __ in plug.copy_strings: section.add(CreatedFile(name=f, href=os.path.join("..", f))) @@ -1504,24 +1506,22 @@ def _create_checksum(self, archive, hash_name): try: hash_size = 1024**2 # Hash 1MiB of content at a time. - archive_fp = open(archive, 'rb') digest = hashlib.new(hash_name) - while True: - hashdata = archive_fp.read(hash_size) - if not hashdata: - break - digest.update(hashdata) - archive_fp.close() + with open(archive, 'rb') as archive_fp: + while True: + hashdata = archive_fp.read(hash_size) + if not hashdata: + break + digest.update(hashdata) except Exception: self.handle_exception() return digest.hexdigest() def _write_checksum(self, archive, hash_name, checksum): # store checksum into file - fp = open(archive + "." + hash_name, "w") - if checksum: - fp.write(checksum + "\n") - fp.close() + with open(archive + "." + hash_name, "w", encoding='utf-8') as fp: + if checksum: + fp.write(checksum + "\n") def final_work(self): archive = None # archive path @@ -1582,7 +1582,7 @@ def final_work(self): # that still will be moved to the sos report final directory path tmpdir_path = Path(self.tmpdir) self.estimated_plugsizes['sos_logs_reports'] = sum( - [f.lstat().st_size for f in tmpdir_path.glob('**/*')]) + f.lstat().st_size for f in tmpdir_path.glob('**/*')) _sum = get_human_readable(sum(self.estimated_plugsizes.values())) self.ui_log.info("Estimated disk space requirement for whole " @@ -1621,8 +1621,7 @@ def final_work(self): except Exception: if self.opts.debug: raise - else: - return False + return False finally: os.umask(old_umask) else: @@ -1796,8 +1795,8 @@ def compile_tags(ent, key='filepath'): def _merge_preset_options(self): # Log command line options - msg = "[%s:%s] executing 'sos %s'" - self.soslog.info(msg % (__name__, "setup", " ".join(self.cmdline))) + self.soslog.info(f"[{__name__}:setup] executing " + f"'sos {' '.join(self.cmdline)}'") # Log active preset defaults preset_args = self.preset.opts.to_args() @@ -1854,20 +1853,22 @@ def execute(self): self.version() return self.final_work() - except (OSError): + except OSError: if self.opts.debug: raise if not os.getenv('SOS_TEST_LOGS', None) == 'keep': self.cleanup() - except (KeyboardInterrupt): + except KeyboardInterrupt: self.ui_log.error("\nExiting on user cancel") self.cleanup() self._exit(130) - except (SystemExit) as e: + except SystemExit as e: if not os.getenv('SOS_TEST_LOGS', None) == 'keep': self.cleanup() sys.exit(e.code) self._exit(1) + # Never gets here. This is to fix "inconsistent-return-statements + return False # vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/__init__.py b/sos/report/plugins/__init__.py index 07df31871b..9b54c68a12 100644 --- a/sos/report/plugins/__init__.py +++ b/sos/report/plugins/__init__.py @@ -71,7 +71,7 @@ def _node_type(st): _cert_replace = "-----SCRUBBED" -class SoSPredicate(object): +class SoSPredicate: """A class to implement collection predicates. A predicate gates the collection of data by an sos plugin. For any @@ -185,9 +185,9 @@ def _check_required_state(self, items, required): """ if required == 'any': return any(items) - elif required == 'all': + if required == 'all': return all(items) - elif required == 'none': + if required == 'none': return not any(items) raise ValueError( f"predicate requires must be 'any', 'all', or 'none' " @@ -374,7 +374,7 @@ def __init__(self, owner, dry_run=False, kmods=[], services=[], } -class SoSCommand(object): +class SoSCommand: """A class to represent a command to be collected. A SoSCommand() object is instantiated for each command handed to an @@ -459,7 +459,7 @@ def set_value(self, val): if type('') in self.val_type: self.value = str(val) return - if not any([type(val) is _t for _t in self.val_type]): + if not any(isinstance(val, _t) for _t in self.val_type): valid = [] for t in self.val_type: if t is None: @@ -576,7 +576,7 @@ def __init__(self, commons): # add the default plugin opts self.options.update(self.get_default_plugin_opts()) - for popt in self.options: + for popt in self.options: # pylint: disable=consider-using-dict-items self.options[popt].plugin = self.name() for opt in self.option_list: opt.plugin = self.name() @@ -1313,6 +1313,21 @@ def do_file_sub(self, srcpath, regexp, subst): replacements = 0 return replacements + def do_paths_http_sub(self, pathspecs): + """ Obfuscate credentials in *_PROXY variables in all files in the + given list. Proxy setting without protocol is ignored, since that + is not recommended setting and obfuscating that one can hit false + positives. + + :param pathspecs: A filepath to obfuscate credentials in + :type pathspecs: ``str`` or a ``list`` of strings + """ + if isinstance(pathspecs, str): + pathspecs = [pathspecs] + for path in pathspecs: + self.do_path_regex_sub( + path, r"(http(s)?://)\S+:\S+(@.*)", r"\1******:******\3") + def do_path_regex_sub(self, pathexp, regexp, subst): """Apply a regexp substituation to a set of files archived by sos. The set of files to be substituted is generated by matching @@ -1344,6 +1359,11 @@ def _copy_symlink(self, srcpath): # Absolute path to the link target. If SYSROOT != '/' this path # is relative to the host root file system. absdest = os.path.normpath(dest) + if self._is_skipped_path(absdest): + self._log_debug(f"skipping excluded path '{absdest}' as symlink " + f"destination from {srcpath}") + return + # adjust the target used inside the report to always be relative if os.path.isabs(linkdest): # Canonicalize the link target path to avoid additional levels @@ -1389,7 +1409,7 @@ def _copy_symlink(self, srcpath): self._log_debug(f"normalized link target '{linkdest}' as '{absdest}'") # skip recursive copying of symlink pointing to itself. - if (absdest != srcpath): + if absdest != srcpath: # this allows for ensuring we collect the host's file when copying # a symlink from within a container that is within the set sysroot force = (absdest.startswith(self.sysroot) and @@ -1431,9 +1451,9 @@ def _is_forbidden_path(self, path): ) def _is_policy_forbidden_path(self, path): - return any([ + return any( fnmatch.fnmatch(path, fp) for fp in self.policy.forbidden_paths - ]) + ) def _is_skipped_path(self, path): """Check if the given path matches a user-provided specification to @@ -1483,14 +1503,13 @@ def _do_copy_path(self, srcpath, dest=None, force=False): if stat.S_ISLNK(st.st_mode): self._copy_symlink(srcpath) return None - else: - if stat.S_ISDIR(st.st_mode) and os.access(srcpath, os.R_OK): - # copy empty directory - if not self.listdir(srcpath): - self.archive.add_dir(dest) - return None - self._copy_dir(srcpath) + if stat.S_ISDIR(st.st_mode) and os.access(srcpath, os.R_OK): + # copy empty directory + if not self.listdir(srcpath): + self.archive.add_dir(dest) return None + self._copy_dir(srcpath) + return None # handle special nodes (block, char, fifo, socket) if not (stat.S_ISREG(st.st_mode) or stat.S_ISDIR(st.st_mode)): @@ -1631,11 +1650,11 @@ def generate_copyspec_tags(self): """After file collections have completed, retroactively generate manifest entries to apply tags to files copied by generic copyspecs """ - for file_regex in self.filetags: + for file_regex, tag in self.filetags.items(): manifest_data = { 'specification': file_regex, 'files_copied': [], - 'tags': self.filetags[file_regex] + 'tags': tag } matched_files = [] for cfile in self.copied_files: @@ -1733,6 +1752,29 @@ def get_filename_tag(fname): return _fname.replace('.', '_') return None + def getmtime(path): + """ Files should be sorted in most-recently-modified order, so + that we collect the newest data first before reaching the limit.""" + try: + return os.path.getmtime(path) + except OSError: + return 0 + + def time_filter(path): + """ When --since is passed, or maxage is coming from the + plugin, we need to filter out older files """ + + # skip config files or not-logarchive files from the filter + if ((logarchive_pattern.search(path) is None) or + (configfile_pattern.search(path) is not None)): + return True + filetime = getmtime(path) + filedatetime = datetime.fromtimestamp(filetime) + if ((since and filedatetime < since) or + (maxage and (time()-filetime < maxage*3600))): + return False + return True + for copyspec in copyspecs: if not (copyspec and len(copyspec)): return False @@ -1798,31 +1840,8 @@ def get_filename_tag(fname): # operations continue - # Files should be sorted in most-recently-modified order, so that - # we collect the newest data first before reaching the limit. - def getmtime(path): - try: - return os.path.getmtime(path) - except OSError: - return 0 - - def time_filter(path): - """ When --since is passed, or maxage is coming from the - plugin, we need to filter out older files """ - - # skip config files or not-logarchive files from the filter - if ((logarchive_pattern.search(path) is None) or - (configfile_pattern.search(path) is not None)): - return True - filetime = getmtime(path) - filedatetime = datetime.fromtimestamp(filetime) - if ((since and filedatetime < since) or - (maxage and (time()-filetime < maxage*3600))): - return False - return True - if since or maxage: - files = list(filter(lambda f: time_filter(f), files)) + files = list(filter(time_filter, files)) files.sort(key=getmtime, reverse=True) current_size = 0 @@ -2008,10 +2027,13 @@ def _add_cmd_output(self, **kwargs): kwargs['priority'] = 10 if 'changes' not in kwargs: kwargs['changes'] = False - if self.get_option('all_logs') or kwargs['sizelimit'] == 0: + if (not getattr(SoSCommand(**kwargs), "snap_cmd", False) and + (self.get_option('all_logs') or kwargs['sizelimit'] == 0)): kwargs['to_file'] = True + if "snap_cmd" in kwargs: + kwargs.pop("snap_cmd") soscmd = SoSCommand(**kwargs) - self._log_debug("packed command: " + soscmd.__str__()) + self._log_debug(f"packed command: {str(soscmd)}") for _skip_cmd in self.skip_commands: # This probably seems weird to be doing filename matching on the # commands, however we want to remain consistent with our regex @@ -2056,9 +2078,9 @@ def add_dir_listing(self, paths, tree=False, recursive=False, chroot=True, paths = [p for p in paths if self.path_exists(p)] if not tree: - options = f"alhZ{'R' if recursive else ''}" + options = f"alZ{'R' if recursive else ''}" else: - options = 'lhp' + options = 'lp' for path in paths: self.add_cmd_output( @@ -2074,7 +2096,7 @@ def add_cmd_output(self, cmds, suggest_filename=None, sizelimit=None, pred=None, subdir=None, changes=False, foreground=False, tags=[], priority=10, cmd_as_tag=False, container=None, - to_file=False, runas=None): + to_file=False, runas=None, snap_cmd=False): """Run a program or a list of programs and collect the output Output will be limited to `sizelimit`, collecting the last X amount @@ -2150,6 +2172,9 @@ def add_cmd_output(self, cmds, suggest_filename=None, :param runas: Run the `cmd` as the `runas` user :type runas: ``str`` + + :param snap_cmd: Are the commands being run from a snap? + :type snap_cmd: ``bool`` """ if isinstance(cmds, str): cmds = [cmds] @@ -2178,7 +2203,7 @@ def add_cmd_output(self, cmds, suggest_filename=None, changes=changes, foreground=foreground, priority=priority, cmd_as_tag=cmd_as_tag, to_file=to_file, container_cmd=container_cmd, - runas=runas) + runas=runas, snap_cmd=snap_cmd) def add_cmd_tags(self, tagdict): """Retroactively add tags to any commands that have been run by this @@ -2808,8 +2833,7 @@ def get_containers(self, runtime=None, get_all=False): if _runtime is not None: if get_all: return _runtime.get_containers(get_all=True) - else: - return _runtime.containers + return _runtime.containers return [] def get_container_images(self, runtime=None): @@ -3152,7 +3176,7 @@ def _collect_container_copy_specs(self): def _collect_cmds(self): self.collect_cmds.sort(key=lambda x: x.priority) for soscmd in self.collect_cmds: - self._log_debug("unpacked command: " + soscmd.__str__()) + self._log_debug(f"unpacked command: {str(soscmd)}") user = "" if getattr(soscmd, "runas", None) is not None: user = f", as the {soscmd.runas} user" @@ -3212,7 +3236,6 @@ def collect(self): are more likely to be interrupted by timeouts than file or command output collections. """ - pass @contextlib.contextmanager def collection_file(self, fname, subdir=None, tags=[]): @@ -3237,7 +3260,7 @@ def collection_file(self, fname, subdir=None, tags=[]): _pfname = self._make_command_filename(fname, subdir=subdir) self.archive.check_path(_pfname, P_FILE) _name = self.archive.dest_path(_pfname) - with open(_name, 'w') as _file: + with open(_name, 'w', encoding='utf-8') as _file: self._log_debug(f"manual collection file opened: {_name}") yield _file end = time() @@ -3372,7 +3395,7 @@ def setup(self): self.add_copy_spec(list(self.files)) def setup_verify(self): - if not hasattr(self, "verify_packages") or not self.verify_packages: + if not hasattr(self, "verify_packages"): if hasattr(self, "packages") and self.packages: # Limit automatic verification to only the named packages self.verify_packages = [p + "$" for p in self.packages] @@ -3466,7 +3489,6 @@ def path_join(self, path, *p): def postproc(self): """Perform any postprocessing. To be replaced by a plugin if required. """ - pass def check_process_by_name(self, process): """Checks if a named process is found in /proc/[0-9]*/cmdline. @@ -3482,7 +3504,8 @@ def check_process_by_name(self, process): try: cmd_line_paths = glob.glob(cmd_line_glob) for path in cmd_line_paths: - with open(self.path_join(path), 'r') as pfile: + with open(self.path_join(path), 'r', + encoding='utf-8') as pfile: cmd_line = pfile.read().strip() if process in cmd_line: status = True @@ -3504,7 +3527,7 @@ def get_process_pids(self, process): cmd_line_paths = glob.glob(cmd_line_glob) for path in cmd_line_paths: try: - with open(path, 'r') as f: + with open(path, 'r', encoding='utf-8') as f: cmd_line = f.read().strip() if process in cmd_line: pids.append(path.split("/")[2]) @@ -3523,6 +3546,7 @@ def filter_namespaces(self, ns_list, ns_pattern=None, ns_max=None): namespaces (options originally present in the networking plugin.) """ out_ns = [] + pattern = None # Regex initialization outside of for loop if ns_pattern: @@ -3552,52 +3576,42 @@ class PluginDistroTag(): Use IndependentPlugin for plugins that are distribution agnostic """ - pass class RedHatPlugin(PluginDistroTag): """Tagging class for Red Hat's Linux distributions""" - pass class UbuntuPlugin(PluginDistroTag): """Tagging class for Ubuntu Linux""" - pass class DebianPlugin(PluginDistroTag): """Tagging class for Debian Linux""" - pass class SuSEPlugin(PluginDistroTag): """Tagging class for SuSE Linux distributions""" - pass class OpenEulerPlugin(PluginDistroTag): """Tagging class for openEuler linux distributions""" - pass class CosPlugin(PluginDistroTag): """Tagging class for Container-Optimized OS""" - pass class IndependentPlugin(PluginDistroTag): """Tagging class for plugins that can run on any platform""" - pass class ExperimentalPlugin(PluginDistroTag): """Tagging class that indicates that this plugin is experimental""" - pass class AzurePlugin(PluginDistroTag): """Tagging class for Azure Linux""" - pass def import_plugin(name, superclasses=None): diff --git a/sos/report/plugins/anaconda.py b/sos/report/plugins/anaconda.py index 78577d3f7e..77f54d650a 100644 --- a/sos/report/plugins/anaconda.py +++ b/sos/report/plugins/anaconda.py @@ -24,21 +24,21 @@ class Anaconda(Plugin, RedHatPlugin): def setup(self): - paths = [ + self.copypaths = [ "/root/anaconda-ks.cfg" ] if self.path_isdir('/var/log/anaconda'): # new anaconda - paths.append('/var/log/anaconda') + self.copypaths.append('/var/log/anaconda') else: - paths = paths + [ + self.copypaths = self.copypaths + [ "/var/log/anaconda.*", "/root/install.log", "/root/install.log.syslog" ] - self.add_copy_spec(paths) + self.add_copy_spec(self.copypaths) def postproc(self): self.do_file_sub( @@ -51,5 +51,6 @@ def postproc(self): r"(user.*--password=*\s*)\s*(\S*)", r"\1********" ) + self.do_paths_http_sub(self.copypaths) # vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/apt.py b/sos/report/plugins/apt.py index 857a11b6fe..464cfb983f 100644 --- a/sos/report/plugins/apt.py +++ b/sos/report/plugins/apt.py @@ -48,19 +48,11 @@ def setup(self): def postproc(self): super().postproc() - common_regex = r"(http(s)?://)\S+:\S+(@.*)" - common_replace = r"\1******:******\3" - - files_to_sub = [ + self.do_paths_http_sub([ "/etc/apt/sources.list", "/etc/apt/sources.list.d/", "/etc/apt/apt.conf", "/etc/apt/apt.conf.d/", - ] - - for file in files_to_sub: - self.do_path_regex_sub( - file, common_regex, common_replace - ) + ]) # vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/bird.py b/sos/report/plugins/bird.py new file mode 100644 index 0000000000..60ef89c88d --- /dev/null +++ b/sos/report/plugins/bird.py @@ -0,0 +1,70 @@ +# Copyright (C) 2024 Jake Hunsaker +# Copyright (C) 2019 Alexander Petrovskiy +# +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, IndependentPlugin + + +class Bird(Plugin, IndependentPlugin): + """BIRD is an Internet Routing Daemon used in many *nix and nix-like + distributions. This plugin will capture the configuration files for a local + bird installation, as well as runtime information and metrics. + """ + + plugin_name = 'bird' + profiles = ('network', ) + packages = ('bird', ) + services = ('bird', ) + + def setup(self): + + try: + with open('/etc/bird.conf', 'r', encoding='utf-8') as bfile: + for line in bfile: + if line.startswith('log'): + # non-file values will be dropped by add_copy_spec() + self.add_copy_spec(line.split()[1].strip('"')) + except Exception as err: + self._log_debug(f"Unable to parse bird.conf: {err}") + + self.add_copy_spec([ + "/etc/bird/*", + "/etc/bird.conf" + ]) + + self.add_cmd_output([ + "birdc show status", + "birdc show memory", + "birdc show protocols all", + "birdc show interfaces", + "birdc show route all", + "birdc show symbols", + "birdc show bfd sessions", + "birdc show babel interfaces", + "birdc show babel neighbors", + "birdc show babel entries", + "birdc show babel routes", + "birdc show ospf", + "birdc show ospf neighbors", + "birdc show ospf interface", + "birdc show ospf topology", + "birdc show ospf state all", + "birdc show ospf lsadb", + "birdc show rip interfaces", + "birdc show rip neighbors", + "birdc show static" + ]) + + def postproc(self): + self.do_path_regex_sub('/etc/bird(.*)?.conf', + r"((.*password)\s\"(.*)\"(.*))", + r"\2 *******\4") + +# vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/block.py b/sos/report/plugins/block.py index 5085fc8857..7c564bd3d7 100644 --- a/sos/report/plugins/block.py +++ b/sos/report/plugins/block.py @@ -67,5 +67,6 @@ def setup(self): if 'crypto_LUKS' in line: dev = line.split()[0] self.add_cmd_output(f'cryptsetup luksDump /dev/{dev}') + self.add_cmd_output(f'clevis luks list -d /dev/{dev}') # vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/bootc.py b/sos/report/plugins/bootc.py new file mode 100644 index 0000000000..4be6538b0c --- /dev/null +++ b/sos/report/plugins/bootc.py @@ -0,0 +1,32 @@ +# Copyright (C) 2024 Red Hat, Inc., Jose Castillo +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, RedHatPlugin + + +class Bootc(Plugin, RedHatPlugin): + + short_desc = 'Bootc' + + plugin_name = 'bootc' + profiles = ('system', 'sysmgmt', 'packagemanager',) + + packages = ('bootc',) + + def setup(self): + self.add_copy_spec([ + "/usr/lib/ostree/prepare-root.conf", + "/usr/lib/bootc/", + ]) + + self.add_cmd_output( + "bootc status", + ) + +# vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/ceph_mds.py b/sos/report/plugins/ceph_mds.py index 6a12a0af8d..abbf2ba644 100644 --- a/sos/report/plugins/ceph_mds.py +++ b/sos/report/plugins/ceph_mds.py @@ -75,7 +75,7 @@ def setup(self): "counter schema", "damage ls", "dump loads", - "dump tree", + "dump tree /", "dump_blocked_ops", "dump_historic_ops", "dump_historic_ops_by_duration", diff --git a/sos/report/plugins/ceph_mgr.py b/sos/report/plugins/ceph_mgr.py index 3a2b0c8a2a..e89606d58c 100644 --- a/sos/report/plugins/ceph_mgr.py +++ b/sos/report/plugins/ceph_mgr.py @@ -47,12 +47,15 @@ def setup(self): microceph_pkg = self.policy.package_manager.pkg_by_name('microceph') ceph_mgr_cmds = ([ - "balancer status", + "balancer eval", + "balancer status", # For pre-Squid releases + "balancer status detail", "healthcheck history ls", "log last cephadm", "mgr dump", "mgr metadata", "mgr module ls", + "mgr services", "mgr stat", "mgr versions" ]) diff --git a/sos/report/plugins/ceph_mon.py b/sos/report/plugins/ceph_mon.py index 8980e5d494..d73927203a 100644 --- a/sos/report/plugins/ceph_mon.py +++ b/sos/report/plugins/ceph_mon.py @@ -106,6 +106,7 @@ def setup(self): "ceph config generate-minimal-conf", "ceph config log", "ceph config-key dump", + "ceph crash ls", "ceph crash stat", "ceph features", "ceph health detail", @@ -118,11 +119,19 @@ def setup(self): "ceph mgr services", "ceph mgr versions", "ceph mon stat", + "ceph mon features ls", + "ceph node ls", + "ceph osd crush class ls", "ceph osd crush dump", + "ceph osd crush rule ls", "ceph osd crush show-tunables", "ceph osd crush tree --show-shadow", "ceph osd erasure-code-profile ls", "ceph osd metadata", + "ceph osd utilization", + "ceph telemetry channel ls", + "ceph telemetry collection ls", + "ceph telemetry ls", "ceph quorum_status", "ceph versions", "ceph-disk list", @@ -141,9 +150,11 @@ def setup(self): "df", "fs dump", "fs ls", + "fs status", "mds stat", "mon dump", "osd blocked-by", + "osd blocklist ls", "osd df tree", "osd df", "osd dump", diff --git a/sos/report/plugins/ceph_osd.py b/sos/report/plugins/ceph_osd.py index 7172697b54..843e60044a 100644 --- a/sos/report/plugins/ceph_osd.py +++ b/sos/report/plugins/ceph_osd.py @@ -46,6 +46,7 @@ def setup(self): "dump_reservations", # will work quincy onward "bluefs stats", + "bluestore allocator dump block", "bluestore bluefs device info", "config diff", "config show", diff --git a/sos/report/plugins/ceph_rgw.py b/sos/report/plugins/ceph_rgw.py index bc22616606..3a50c312cb 100644 --- a/sos/report/plugins/ceph_rgw.py +++ b/sos/report/plugins/ceph_rgw.py @@ -6,6 +6,9 @@ # # See the LICENSE file in the source distribution for further information. +import json + +from socket import gethostname from sos.report.plugins import Plugin, RedHatPlugin, UbuntuPlugin @@ -21,6 +24,26 @@ class CephRGW(Plugin, RedHatPlugin, UbuntuPlugin): def setup(self): all_logs = self.get_option("all_logs") + cmds = ['bucket limit check', + 'bucket list', + 'bucket stats', + 'datalog list', + 'datalog status', + 'gc list', + 'lc list', + 'log list', + 'metadata sync status', + 'period list', + 'realm list', + 'reshard list', + 'sync error list', + 'sync status', + 'zone list', + 'zone placement list', + 'zonegroup list', + 'zonegroup placement list', + ] + microceph = self.policy.package_manager.pkg_by_name('microceph') if microceph: if all_logs: @@ -57,4 +80,42 @@ def setup(self): "/etc/ceph/*bindpass*" ]) + # Get commands output for both Ceph and microCeph + rgw_id = "radosgw.gateway" if microceph else "rgw." + gethostname() + self.add_cmd_output([f"radosgw-admin --id={rgw_id} {c}" for c in cmds]) + + # Get all the zone data + res = self.collect_cmd_output(f'radosgw-admin --id={rgw_id} zone list') + if res['status'] == 0: + try: + _out = json.loads(res['output']) + zone_list = _out['zones'] + for zone in zone_list: + self.add_cmd_output(f'radosgw-admin --id={rgw_id} ' + f'zone get --rgw-zone={zone}') + except ValueError as err: + self._log_error(f'Error while getting get rgw ' + f'zone list: {err}') + + # Get all the zonegroup data + res = self.collect_cmd_output(f'radosgw-admin --id={rgw_id} ' + f'zonegroup list') + if res['status'] == 0: + try: + _out = json.loads(res['output']) + zonegroups = _out['zonegroups'] + for zgroup in zonegroups: + self.add_cmd_output(f'radosgw-admin --id={rgw_id} ' + f'zone get --rgw-zonegroup={zgroup}') + except ValueError as err: + self._log_error(f'Error while getting get rgw ' + f'zonegroup list: {err}') + + def postproc(self): + """ Obfuscate secondary zone access keys """ + + rsub = r'("access_key":|"secret_key":)\s.*' + self.do_cmd_output_sub("radosgw-admin", rsub, r'\1 "**********"') + + # vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/cgroups.py b/sos/report/plugins/cgroups.py index 35075d83ca..e7930faa5c 100644 --- a/sos/report/plugins/cgroups.py +++ b/sos/report/plugins/cgroups.py @@ -26,7 +26,8 @@ def setup(self): self.add_copy_spec([ "/proc/cgroups", - "/sys/fs/cgroup" + "/sys/fs/cgroup", + "/proc/[0-9]*/cgroup", ]) self.add_cmd_output("systemd-cgls") diff --git a/sos/report/plugins/charmed_postgresql.py b/sos/report/plugins/charmed_postgresql.py index 6f20c6337b..d6cc16cbc2 100644 --- a/sos/report/plugins/charmed_postgresql.py +++ b/sos/report/plugins/charmed_postgresql.py @@ -84,7 +84,7 @@ def setup(self): # Read and parse patroni config, finish setup if there are errors try: - with open(PATRONI_CONFIG_FILE) as f: + with open(PATRONI_CONFIG_FILE, encoding='utf-8') as f: patroni_config = yaml.safe_load(f) self.patroni_cluster_name = patroni_config["scope"] postgresql = patroni_config["postgresql"] diff --git a/sos/report/plugins/container_log.py b/sos/report/plugins/container_log.py index 98d9637ace..3a45f2b7cc 100644 --- a/sos/report/plugins/container_log.py +++ b/sos/report/plugins/container_log.py @@ -9,7 +9,7 @@ # See the LICENSE file in the source distribution for further information. import os -from sos.report.plugins import Plugin, IndependentPlugin +from sos.report.plugins import Plugin, IndependentPlugin, PluginOpt class ContainerLog(Plugin, IndependentPlugin): @@ -17,18 +17,29 @@ class ContainerLog(Plugin, IndependentPlugin): short_desc = 'All logs under /var/log/containers' plugin_name = 'container_log' logdir = '/var/log/containers/' + poddir = '/var/log/pods/' + rotated_dirs = [poddir + '*/*.log.*', poddir + '*/*/*.log.*'] files = (logdir, ) + option_list = [ + PluginOpt('rotated', default=False, val_type=bool, + desc='also get rotated logs from /var/log/pods'), + ] + def setup(self): if self.get_option('all_logs'): self.add_copy_spec(self.logdir) + if self.get_option('rotated'): + self.add_copy_spec(self.rotated_dirs) else: - self.collect_subdirs() + self.collect_subdirs(self.logdir, '*.log') + if self.get_option('rotated'): + self.collect_subdirs(self.poddir, '*.log.*') - def collect_subdirs(self, root=logdir): + def collect_subdirs(self, root, glob): """Collect *.log files from subdirs of passed root path """ for dir_name, _, _ in os.walk(root): - self.add_copy_spec(self.path_join(dir_name, '*.log')) + self.add_copy_spec(self.path_join(dir_name, glob)) # vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/containerd.py b/sos/report/plugins/containerd.py index ecba988721..a8122ed683 100644 --- a/sos/report/plugins/containerd.py +++ b/sos/report/plugins/containerd.py @@ -23,6 +23,7 @@ def setup(self): ]) self.add_cmd_output('containerd config dump') + self.add_cmd_output('ctr deprecations list') # collect the containerd logs. self.add_journal(units='containerd') diff --git a/sos/report/plugins/foreman.py b/sos/report/plugins/foreman.py index f3d3f4ad37..c18cac9ddf 100644 --- a/sos/report/plugins/foreman.py +++ b/sos/report/plugins/foreman.py @@ -297,7 +297,12 @@ def collect_proxies(self): timeout=10) # collect http[|s]_proxy env.variables - self.add_env_var(["http_proxy", "https_proxy"]) + self.add_env_var([ + 'HTTP_PROXY', + 'HTTPS_PROXY', + 'NO_PROXY', + 'ALL_PROXY', + ]) def build_query_cmd(self, query, csv=False, binary="psql"): """ diff --git a/sos/report/plugins/foreman_proxy.py b/sos/report/plugins/foreman_proxy.py index 5f684be259..e958e25946 100644 --- a/sos/report/plugins/foreman_proxy.py +++ b/sos/report/plugins/foreman_proxy.py @@ -42,7 +42,12 @@ def setup(self): ]) # collect http[|s]_proxy env.variables - self.add_env_var(["http_proxy", "https_proxy"]) + self.add_env_var([ + 'HTTP_PROXY', + 'HTTPS_PROXY', + 'NO_PROXY', + 'ALL_PROXY', + ]) def postproc(self): self.do_path_regex_sub( diff --git a/sos/report/plugins/grafana.py b/sos/report/plugins/grafana.py index 2b0ac44d8d..01819028d6 100644 --- a/sos/report/plugins/grafana.py +++ b/sos/report/plugins/grafana.py @@ -25,7 +25,7 @@ def setup(self): log_path = "/var/snap/grafana/common/data/log/" config_path = "/var/snap/grafana/current/conf/grafana.ini" - self.add_cmd_output("snap info grafana") + self.add_cmd_output("snap info grafana", snap_cmd=True) else: grafana_cli = "grafana-cli" log_path = "/var/log/grafana/" @@ -36,7 +36,7 @@ def setup(self): f'{grafana_cli} plugins list-remote', f'{grafana_cli} -v', 'grafana-server -v', - ]) + ], snap_cmd=self.is_snap) log_file_pattern = "*.log*" if self.get_option("all_logs") else "*.log" diff --git a/sos/report/plugins/kea.py b/sos/report/plugins/kea.py new file mode 100644 index 0000000000..8512d069e1 --- /dev/null +++ b/sos/report/plugins/kea.py @@ -0,0 +1,44 @@ +# Copyright (C) 2024 Red Hat, Inc., Jose Castillo + +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import (Plugin, IndependentPlugin) + + +class Kea(Plugin, IndependentPlugin): + """ + Kea is the next generation of DHCP software, developed by Internet + Systems Consortium (ISC). It supports both the DHCPv4 and DHCPv6 protocols + along with their extensions, e.g. prefix delegation and dynamic updates to + DNS. + """ + short_desc = 'Kea DHCP and DDNS server from ISC' + + plugin_name = "kea" + packages = ("kea", "kea-common",) + services = ('kea-ctrl-agent', 'kea-dhcp-ddns-server', + 'kea-dhcp4-server', 'kea-dhcp6-server',) + + def setup(self): + self.add_copy_spec([ + "/etc/kea/*", + ]) + self.add_cmd_output([ + "keactrl status", + ]) + + def postproc(self): + """ format is "password": "kea", """ + self.do_path_regex_sub( + '/etc/kea/*', + r'(^\s*"password":\s*)(".*"),', + r'\1********' + ) + +# vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/krb5.py b/sos/report/plugins/krb5.py index 7612652b9f..4051eeadf7 100644 --- a/sos/report/plugins/krb5.py +++ b/sos/report/plugins/krb5.py @@ -8,6 +8,8 @@ # # See the LICENSE file in the source distribution for further information. +import re +import socket from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin, UbuntuPlugin @@ -33,10 +35,38 @@ def setup(self): f"{self.kdcdir}/kdc.conf", "/var/log/kadmind.log" ]) + self.collect_kinit() self.add_copy_spec("/var/log/krb5kdc.log", tags="kerberos_kdc_log") self.add_cmd_output(f"klist -ket {self.kdcdir}/.k5*") self.add_cmd_output("klist -ket /etc/krb5.keytab") + def collect_kinit(self): + """ + Collect the kinit command output for the system with id_provider "AD" + or "IPA" domains. + + While integrating the Linux M/c with AD the realmd will create a + computer object on the AD side. The realmd and AD restrict the + Hostname/SPN to 15 Characters. + """ + + hostname = socket.getfqdn() + sssd_conf = "/etc/sssd/sssd.conf" + if self.path_isfile(sssd_conf): + with open(sssd_conf, 'r', encoding='utf-8') as f: + for line in f: + if re.match(r'\s*id_provider\s*=\s*ad', + line, re.IGNORECASE): + hostname = hostname.split('.')[0][:15].upper() + self.add_cmd_output(f"KRB5_TRACE=/dev/stdout \ + kinit -k '{hostname}$'") + break + if re.match(r'\s*id_provider\s*=\s*ipa', + line, re.IGNORECASE): + self.add_cmd_output(f"KRB5_TRACE=/dev/stdout \ + kinit -k '{hostname}'") + break + class RedHatKrb5(Krb5, RedHatPlugin): diff --git a/sos/report/plugins/leapp.py b/sos/report/plugins/leapp.py index fd64a11537..379908ec81 100644 --- a/sos/report/plugins/leapp.py +++ b/sos/report/plugins/leapp.py @@ -16,10 +16,13 @@ class Leapp(Plugin, RedHatPlugin): short_desc = 'Leapp upgrade handling tool' plugin_name = 'leapp' - packages = ('leapp', 'leapp-repository') + packages = ('leapp',) + files = ('/var/lib/leapp',) def setup(self): self.add_copy_spec([ + '/etc/migration-results', + '/var/log/leapp/answerfile', '/var/log/leapp/dnf-debugdata/', '/var/log/leapp/leapp-preupgrade.log', '/var/log/leapp/leapp-upgrade.log', diff --git a/sos/report/plugins/lustre.py b/sos/report/plugins/lustre.py index 4f08b4aea8..44f87b128f 100644 --- a/sos/report/plugins/lustre.py +++ b/sos/report/plugins/lustre.py @@ -33,7 +33,7 @@ def setup(self): "lctl device_list", "lctl list_nids", "lctl route_list", - "lnetctl net show -v" + "lnetctl net show -v 4" ]) # Grab almost everything diff --git a/sos/report/plugins/lvm2.py b/sos/report/plugins/lvm2.py index 1bd3869953..0961c6fb24 100644 --- a/sos/report/plugins/lvm2.py +++ b/sos/report/plugins/lvm2.py @@ -23,7 +23,7 @@ class Lvm2(Plugin, IndependentPlugin): desc=('attempt to collect lvmdump with advanced options and ' 'raw metadata')), PluginOpt('metadata', default=False, - desc=('attempt to collect headers and metadata via pvck')) + desc='attempt to collect headers and metadata via pvck') ] def do_lvmdump(self, metadata=False): diff --git a/sos/report/plugins/lxd.py b/sos/report/plugins/lxd.py index 29f89dd487..f5c9c27495 100644 --- a/sos/report/plugins/lxd.py +++ b/sos/report/plugins/lxd.py @@ -26,7 +26,7 @@ def setup(self): lxd_pred = SoSPredicate(self, services=['snap.lxd.daemon'], required={'services': 'all'}) - self.add_cmd_output("lxd.buginfo", pred=lxd_pred) + self.add_cmd_output("lxd.buginfo", pred=lxd_pred, snap_cmd=True) self.add_copy_spec([ '/var/snap/lxd/common/config', diff --git a/sos/report/plugins/maas.py b/sos/report/plugins/maas.py index eb758d9742..d72abb05df 100644 --- a/sos/report/plugins/maas.py +++ b/sos/report/plugins/maas.py @@ -8,19 +8,29 @@ # # See the LICENSE file in the source distribution for further information. -from sos.report.plugins import Plugin, UbuntuPlugin, PluginOpt +import os +from sos.report.plugins import Plugin, UbuntuPlugin -class Maas(Plugin, UbuntuPlugin): +class MAAS(Plugin, UbuntuPlugin): - short_desc = 'Ubuntu Metal-As-A-Service' + short_desc = 'MAAS | Metal as a Service' plugin_name = 'maas' + plugin_timeout = 1800 profiles = ('sysmgmt',) - packages = ('maas', 'maas-common') - services = ( - # For the deb: + packages = ( + 'maas', + 'maas-region-api', + 'maas-region-controller', + 'maas-rack-controller', + 'maas-agent', + ) + + _services = ( + 'maas-agent', + 'maas-apiserver', 'maas-dhcpd', 'maas-dhcpd6', 'maas-http', @@ -28,104 +38,141 @@ class Maas(Plugin, UbuntuPlugin): 'maas-rackd', 'maas-regiond', 'maas-syslog', - # MAAS 3.5 deb: 'maas-temporal', - 'maas-apiserver', - 'maas-agent', - # For the pre-3.5 snap: + 'maas-temporal-worker', 'snap.maas.supervisor', - # MAAS 3.5 snap uses `snap.maas.pebble` service, but it's not - # included here to prevent automatic journald log collection. + 'snap.maas.pebble', ) - option_list = [ - PluginOpt('profile-name', default='', val_type=str, - desc='Name of the remote API'), - PluginOpt('url', default='', val_type=str, - desc='URL of the remote API'), - PluginOpt('credentials', default='', val_type=str, - desc='Credentials, or the API key') - ] - - def _has_login_options(self): - return self.get_option("url") and self.get_option("credentials") \ - and self.get_option("profile-name") - - def _remote_api_login(self): - ret = self.exec_cmd( - f"maas login {self.get_option('profile-name')} " - f"{self.get_option('url')} {self.get_option('credentials')}" - ) - - return ret['status'] == 0 - - def setup(self): - if self.is_snap: - self.add_cmd_output([ - 'snap info maas', - 'maas status' - ]) - - if self.is_service("snap.maas.pebble"): - # Because `snap.maas.pebble` is not in the services - # tuple to prevent timeouts caused by log collection, - # service status and logs are collected here. - self.add_service_status("snap.maas.pebble") - since = self.get_option("since") or "-1days" - self.add_journal(units="snap.maas.pebble", since=since) - - # Don't send secrets - self.add_forbidden_path([ - "/var/snap/maas/current/bind/session.key", - "/var/snap/maas/current/http/certs/regiond-proxy-key.pem", - ]) + def _get_machines_syslog(self, directory): + if not self.path_exists(directory): + return [] + + # Machine messages are collected with syslog and are stored under: + # $template "{{log_dir}}/rsyslog/%HOSTNAME%/%$YEAR%-%$MONTH%-%$DAY%" + # Collect only the most recent "%$YEAR%-%$MONTH%-%$DAY%" + # for each "%HOSTNAME%". + recent = [] + for host_dir in self.listdir(directory): + host_path = self.path_join(directory, host_dir) + if not self.path_isdir(host_path): + continue + + subdirs = [ + self.path_join(host_path, d) + for d in self.listdir(host_path) + if self.path_isdir(host_path) + ] + + if not subdirs: + continue + + sorted_subdirs = sorted( + subdirs, key=lambda d: os.stat(d).st_mtime, reverse=True + ) + + all_logs = self.get_option("all_logs") + since = self.get_option("since") + + if not all_logs and not since: + recent.append(sorted_subdirs[0]) + else: + since = since.timestamp() if since else 0 + recent.extend( + [d for d in sorted_subdirs if os.stat(d).st_mtime >= since] + ) + + return recent + + def _snap_collect(self): + self.add_cmd_output([ + 'snap info maas', + 'maas status', + ], snap_cmd=True) + + self.add_forbidden_path([ + "/var/snap/maas/**/*.key", + "/var/snap/maas/**/*.pem", + "/var/snap/maas/**/secret", + ]) + + self.add_copy_spec([ + "/var/snap/maas/common/snap_mode", + "/var/snap/maas/common/log/**/*.log", + "/var/snap/maas/current/**/*.conf", + "/var/snap/maas/current/**/*.yaml", + "/var/snap/maas/current/bind", + "/var/snap/maas/current/preseeds", + "/var/snap/maas/current/supervisord/*.log", + ]) + + if self.get_option("all_logs"): self.add_copy_spec([ - "/var/snap/maas/common/log", - "/var/snap/maas/common/snap_mode", - "/var/snap/maas/current/*.conf", - "/var/snap/maas/current/bind", - "/var/snap/maas/current/http", - "/var/snap/maas/current/supervisord", - "/var/snap/maas/current/preseeds", - "/var/snap/maas/current/proxy", - "/var/snap/maas/current/syslog", + "/var/snap/maas/common/log/**/*.log.*", + "/var/snap/maas/current/supervisord/*.log.*", ]) - else: + + self.add_copy_spec( + self._get_machines_syslog( + "/var/snap/maas/common/log/rsyslog" + ) + ) + + def _deb_collect(self): + self.add_cmd_output([ + "apt-cache policy maas maas-*", + ]) + + self.add_forbidden_path([ + "/var/lib/maas/**/*.key", + "/var/lib/maas/**/*.pem", + "/var/lib/maas/**/secret", + "/etc/maas/**/*.key", + "/etc/maas/**/*.pem", + "/etc/maas/**/secret", + ]) + + self.add_copy_spec([ + "/etc/maas/**/*.conf", + "/etc/maas/**/*.yaml", + "/etc/maas/preseeds", + "/var/lib/maas/**/*.conf", + "/var/lib/maas/dhcp/*.leases", + "/var/lib/maas/temporal", + "/var/log/maas/**/*.log", + ]) + + if self.get_option("all_logs"): self.add_copy_spec([ - "/etc/squid-deb-proxy", - "/etc/maas", - "/var/lib/maas/dhcp*", - "/var/lib/maas/http/*.conf", - "/var/lib/maas/*.conf", - "/var/lib/maas/rsyslog", - "/var/log/maas*", - "/var/log/upstart/maas-*", - ]) - self.add_cmd_output([ - "apt-cache policy maas-*", - "apt-cache policy python-django-*", + "/var/log/maas/**/*.log.*", ]) - if self.is_installed("maas-region-controller"): - self.add_cmd_output([ - "maas-region dumpdata", - ]) + self.add_copy_spec( + self._get_machines_syslog( + "/var/log/maas/rsyslog" + ) + ) - if self._has_login_options(): - if self._remote_api_login(): - self.add_cmd_output(f"maas {self.get_option('profile-name')} " - "commissioning-results list") - else: - self._log_error( - "Cannot login into MAAS remote API with provided creds.") + def setup(self): + for service in self._services: + if self.is_service(service): + self.add_service_status(service) + if not self.get_option('all_logs'): + since = self.get_option("since") or "-1days" + self.add_journal(service, since=since) + else: + self.add_journal(service) - def postproc(self): if self.is_snap: - regiond_path = "/var/snap/maas/current/maas/regiond.conf" + self._snap_collect() else: - regiond_path = "/etc/maas/regiond.conf" - self.do_file_sub(regiond_path, - r"(database_pass\s*:\s*)(.*)", - r"\1********") + self._deb_collect() + + def postproc(self): + self.do_path_regex_sub( + r"(.*)\.(conf|yaml|yml|toml)$", + r"((?:.*secret|.*password|.*pass)(?::\s*|=\s*))(.*)", + r"\1*****" + ) # vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/memory.py b/sos/report/plugins/memory.py index 00ef9f05c9..07a174ef52 100644 --- a/sos/report/plugins/memory.py +++ b/sos/report/plugins/memory.py @@ -27,7 +27,9 @@ def setup(self): "/proc/vmallocinfo", "/sys/kernel/mm/ksm", "/sys/kernel/mm/transparent_hugepage/enabled", - "/sys/kernel/mm/hugepages" + "/sys/kernel/mm/hugepages", + "/sys/kernel/mm/lru_gen/enabled", + "/sys/kernel/mm/lru_gen/min_ttl_ms", ]) self.add_cmd_output("free", root_symlink="free") self.add_cmd_output([ diff --git a/sos/report/plugins/microcloud.py b/sos/report/plugins/microcloud.py new file mode 100644 index 0000000000..2bd10b1747 --- /dev/null +++ b/sos/report/plugins/microcloud.py @@ -0,0 +1,44 @@ +# Copyright (C) 2024 Alan Baghumian + +# This file is part of the sos project: https://github.com/sosreport/sos +# +# This copyrighted material is made available to anyone wishing to use, +# modify, copy, or redistribute it subject to the terms and conditions of +# version 2 of the GNU General Public License. +# +# See the LICENSE file in the source distribution for further information. + +from sos.report.plugins import Plugin, UbuntuPlugin + + +class MicroCloud(Plugin, UbuntuPlugin): + """The MicroCloud plugin collects the current status of the microcloud + snap. + + It will collect journald logs as well as output from various microcloud + commands. + """ + + short_desc = 'MicroCloud Snap' + plugin_name = "microcloud" + profiles = ('container',) + + packages = ('microcloud',) + + def setup(self): + self.add_journal(units="snap.microcloud.*") + + microcloud_subcmds = [ + 'cluster list', + '--version' + ] + self.add_copy_spec([ + '/var/snap/microcloud/common/state/database/cluster.yaml', + '/var/snap/microcloud/common/state/database/info.yaml', + ]) + + self.add_cmd_output([ + f"microcloud {subcmd}" for subcmd in microcloud_subcmds + ]) + +# vim: set et ts=4 sw=4 diff --git a/sos/report/plugins/networking.py b/sos/report/plugins/networking.py index aaae907be7..d5b62b1c7c 100644 --- a/sos/report/plugins/networking.py +++ b/sos/report/plugins/networking.py @@ -109,6 +109,8 @@ def setup(self): "ip neigh show nud noarp", "biosdevname -d", "tc -s qdisc show", + "nmstatectl show", + "nmstatectl show --running-config", ]) if self.path_isdir('/sys/class/devlink'): @@ -116,6 +118,11 @@ def setup(self): "devlink dev param show", "devlink dev info", "devlink port show", + "devlink sb show", + "devlink sb pool show", + "devlink sb port pool show", + "devlink sb tc bind show", + "devlink -s -v trap show", "devlink health show", ]) @@ -123,7 +130,24 @@ def setup(self): if devlinks['status'] == 0: devlinks_list = devlinks['output'].splitlines() for devlink in devlinks_list: - self.add_cmd_output(f"devlink dev eswitch show {devlink}") + self.add_cmd_output([ + f"devlink dev eswitch show {devlink}", + f"devlink sb occupancy snapshot {devlink}", + f"devlink sb occupancy show {devlink}", + f"devlink -v resource show {devlink}" + ]) + dev_tables = [] + dpipe = self.collect_cmd_output( + f"devlink dpipe table show {devlink}" + ) + if dpipe['status'] == 0: + for tableln in dpipe['output'].splitlines(): + if tableln.startswith('name'): + dev_tables.append(tableln.split()[1]) + self.add_cmd_output([ + f"devlink dpipe table show {devlink} name {dname}" + for dname in dev_tables + ]) # below commands require some kernel module(s) to be loaded # run them only if the modules are loaded, or if explicitly requested @@ -226,17 +250,17 @@ def collect_ss_ip_ethtool_info(self): _subdir = f"namespaces/{namespace}" ns_cmd_prefix = cmd_prefix + namespace + " " self.add_cmd_output([ - ns_cmd_prefix + "ip -d address show", - ns_cmd_prefix + "ip route show table all", - ns_cmd_prefix + "ip -s -s neigh show", - ns_cmd_prefix + "ip -4 rule list", - ns_cmd_prefix + "ip -6 rule list", - ns_cmd_prefix + "ip vrf show", - ns_cmd_prefix + "sysctl -a", - ns_cmd_prefix + f"netstat {self.ns_wide} -neopa", - ns_cmd_prefix + "netstat -s", - ns_cmd_prefix + f"netstat {self.ns_wide} -agn", - ns_cmd_prefix + "nstat -zas", + f"{ns_cmd_prefix} ip -d address show", + f"{ns_cmd_prefix} ip route show table all", + f"{ns_cmd_prefix} ip -s -s neigh show", + f"{ns_cmd_prefix} ip -4 rule list", + f"{ns_cmd_prefix} ip -6 rule list", + f"{ns_cmd_prefix} ip vrf show", + f"{ns_cmd_prefix} sysctl -a", + f"{ns_cmd_prefix} netstat {self.ns_wide} -neopa", + f"{ns_cmd_prefix} netstat -s", + f"{ns_cmd_prefix} netstat {self.ns_wide} -agn", + f"{ns_cmd_prefix} nstat -zas", ], priority=50, subdir=_subdir) self.add_cmd_output([ns_cmd_prefix + "iptables-save"], pred=iptables_with_nft, @@ -259,10 +283,11 @@ def collect_ss_ip_ethtool_info(self): # Devices that exist in a namespace use less ethtool # parameters. Run this per namespace. self.add_device_cmd([ - ns_cmd_prefix + "ethtool %(dev)s", - ns_cmd_prefix + "ethtool -i %(dev)s", - ns_cmd_prefix + "ethtool -k %(dev)s", - ns_cmd_prefix + "ethtool -S %(dev)s" + f"{ns_cmd_prefix} ethtool %(dev)s", + f"{ns_cmd_prefix} ethtool -i %(dev)s", + f"{ns_cmd_prefix} ethtool -k %(dev)s", + f"{ns_cmd_prefix} ethtool -S %(dev)s", + f"{ns_cmd_prefix} ethtool -m %(dev)s" ], devices=_devs['ethernet'], priority=50, subdir=_subdir) self.add_command_tags() diff --git a/sos/report/plugins/nvidia.py b/sos/report/plugins/nvidia.py index 25d35e53a1..86e93ada43 100644 --- a/sos/report/plugins/nvidia.py +++ b/sos/report/plugins/nvidia.py @@ -16,9 +16,13 @@ class Nvidia(Plugin, IndependentPlugin): short_desc = 'Nvidia GPU information' plugin_name = 'nvidia' - commands = ('nvidia-smi',) + commands = ('nvidia-smi', 'nvidia-ctk',) + services = ('nvidia-persistenced', 'nvidia-fabricmanager', + 'nvidia-toolkit-firstboot') def setup(self): + self.add_copy_spec("/etc/cdi/nvidia.yaml") + subcmds = [ '--list-gpus', '-q -d PERFORMANCE', @@ -29,9 +33,12 @@ def setup(self): 'nvlink -s', 'nvlink -e' ] - - self.add_service_status("nvidia-persistenced") + ctk_subcmds = [ + 'cdi list', + '--version', + ] self.add_cmd_output([f"nvidia-smi {cmd}" for cmd in subcmds]) + self.add_cmd_output([f"nvidia-ctk {cmd}" for cmd in ctk_subcmds]) query = ('gpu_name,gpu_bus_id,vbios_version,temperature.gpu,' 'utilization.gpu,memory.total,memory.free,memory.used,' @@ -42,6 +49,5 @@ def setup(self): self.add_cmd_output( f"nvidia-smi --query-retired-pages={querypages} --format=csv" ) - self.add_journal(boot=0, identifier='nvidia-persistenced') # vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/omnipath_client.py b/sos/report/plugins/omnipath_client.py index be04c70ef0..772a10fe29 100644 --- a/sos/report/plugins/omnipath_client.py +++ b/sos/report/plugins/omnipath_client.py @@ -2,8 +2,7 @@ # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. +# the Free Software Foundation; either version 2 of the License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sos/report/plugins/omnipath_manager.py b/sos/report/plugins/omnipath_manager.py index 7d206d0391..8177c9d558 100644 --- a/sos/report/plugins/omnipath_manager.py +++ b/sos/report/plugins/omnipath_manager.py @@ -2,8 +2,7 @@ # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. +# the Free Software Foundation; either version 2 of the License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/sos/report/plugins/openstack_edpm.py b/sos/report/plugins/openstack_edpm.py index ca95f110e7..0e642a294b 100644 --- a/sos/report/plugins/openstack_edpm.py +++ b/sos/report/plugins/openstack_edpm.py @@ -17,7 +17,7 @@ class OpenStackEDPM(Plugin, RedHatPlugin): plugin_name = 'openstack_edpm' profiles = ('openstack', 'openstack_edpm') - services = ('edpm-container-shutdown') + services = 'edpm-container-shutdown' edpm_log_paths = [] def setup(self): diff --git a/sos/report/plugins/openstack_novajoin.py b/sos/report/plugins/openstack_novajoin.py index bc73c9a5da..1f9063b145 100644 --- a/sos/report/plugins/openstack_novajoin.py +++ b/sos/report/plugins/openstack_novajoin.py @@ -25,7 +25,7 @@ def setup(self): self.add_copy_spec("/var/log/novajoin/*.log") def postproc(self): - regexp = (r"(password|memcache_secret_key)=(.*)") + regexp = r"(password|memcache_secret_key)=(.*)" self.do_file_sub("/etc/novajoin/join.conf", regexp, r"\1=*********") diff --git a/sos/report/plugins/ovn_central.py b/sos/report/plugins/ovn_central.py index 6c09f18093..8fa017a453 100644 --- a/sos/report/plugins/ovn_central.py +++ b/sos/report/plugins/ovn_central.py @@ -90,8 +90,7 @@ def add_database_output(self, tables, ovn_cmd): """ Collect OVN database output """ if tables: return [f"{ovn_cmd} list {table}" for table in tables] - else: - return None + return None def setup(self): # check if env is a clustered or non-clustered one diff --git a/sos/report/plugins/pam.py b/sos/report/plugins/pam.py index a9773ff1f2..fbe3d08b72 100644 --- a/sos/report/plugins/pam.py +++ b/sos/report/plugins/pam.py @@ -27,7 +27,8 @@ def setup(self): self.add_copy_spec([ "/etc/pam.d", - "/etc/security" + "/etc/security", + '/etc/authselect/authselect.conf', ]) self.add_cmd_output([ "pam_tally2", diff --git a/sos/report/plugins/process.py b/sos/report/plugins/process.py index d478faf09c..b4a12ad193 100644 --- a/sos/report/plugins/process.py +++ b/sos/report/plugins/process.py @@ -39,7 +39,9 @@ def setup(self): self.add_copy_spec([ "/proc/sched_debug", - "/proc/stat" + "/proc/stat", + "/sys/kernel/debug/sched/debug", + "/sys/kernel/debug/sched/features" ]) procs = [p for p in self.listdir("/proc") if re.match("[0-9]", p)] diff --git a/sos/report/plugins/processor.py b/sos/report/plugins/processor.py index bb0de290a1..44f07dca34 100644 --- a/sos/report/plugins/processor.py +++ b/sos/report/plugins/processor.py @@ -66,9 +66,11 @@ def setup(self): "cpupower frequency-info", "cpupower info", "cpupower idle-info", - "turbostat --debug sleep 10", ], cmd_as_tag=True, pred=cpupower_pred) + self.add_cmd_output("turbostat --debug sleep 10", cmd_as_tag=True, + pred=cpupower_pred, timeout=15) + if '86' in self.policy.get_arch(): self.add_cmd_output("x86info -a") diff --git a/sos/report/plugins/pulpcore.py b/sos/report/plugins/pulpcore.py index f3696cd595..753af5d5d6 100644 --- a/sos/report/plugins/pulpcore.py +++ b/sos/report/plugins/pulpcore.py @@ -105,8 +105,15 @@ def setup(self): task_days = self.get_option('task-days') for table in ['core_task', 'core_taskgroup', 'core_groupprogressreport', 'core_progressreport']: - _query = (f"select * from {table} where pulp_last_updated > NOW()" - f" - interval '{task_days} days' order by" + _query = ("COPY (SELECT STRING_AGG(column_name, ', ') FROM " + f"information_schema.columns WHERE table_name='{table}'" + "AND table_schema = 'public' AND column_name NOT IN" + " ('args', 'kwargs', 'enc_args', 'enc_kwargs'))" + " TO STDOUT;") + col_out = self.exec_cmd(self.build_query_cmd(_query), env=self.env) + columns = col_out['output'] if col_out['status'] == 0 else '*' + _query = (f"select {columns} from {table} where pulp_last_updated" + f"> NOW() - interval '{task_days} days' order by" " pulp_last_updated") _cmd = self.build_query_cmd(_query) self.add_cmd_output(_cmd, env=self.env, suggest_filename=table) diff --git a/sos/report/plugins/qpid.py b/sos/report/plugins/qpid.py index 87b5315e1d..5c886c209a 100644 --- a/sos/report/plugins/qpid.py +++ b/sos/report/plugins/qpid.py @@ -37,7 +37,7 @@ def setup(self): for option in ["ssl-certificate", "ssl-key"]: if self.get_option(option): amqps_prefix = "amqps://" - options = (options + f" --{option}={self.get_option(option)}") + options = options + f" --{option}={self.get_option(option)}" if self.get_option("port"): options = (options + " -b " + amqps_prefix + f"localhost:{self.get_option('port')}") diff --git a/sos/report/plugins/scsi.py b/sos/report/plugins/scsi.py index 553f8b57d7..1680d20e0a 100644 --- a/sos/report/plugins/scsi.py +++ b/sos/report/plugins/scsi.py @@ -58,7 +58,8 @@ def setup(self): "lsscsi -H", "lsscsi -d", "lsscsi -s", - "lsscsi -L" + "lsscsi -L", + "lsscsi -iw", ]) scsi_hosts = glob("/sys/class/scsi_host/*") diff --git a/sos/report/plugins/shmcli.py b/sos/report/plugins/shmcli.py index e0e2b18e1c..5255236ee6 100644 --- a/sos/report/plugins/shmcli.py +++ b/sos/report/plugins/shmcli.py @@ -91,7 +91,7 @@ def collect_enclosures_list(self): _dcmd = (f"{self.shmcli_bin} getdebugcli " f"-a={adapt_index} -enc={enc_index}") _dname = _dcmd.replace(self.shmcli_bin, 'shmcli') - _odir = (f" -outputdir={logpath}") + _odir = f" -outputdir={logpath}" self.add_cmd_output( _dcmd + _odir, suggest_filename=_dname, timeout=300 diff --git a/sos/report/plugins/sudo.py b/sos/report/plugins/sudo.py index 2c35fc292d..c4d302912b 100644 --- a/sos/report/plugins/sudo.py +++ b/sos/report/plugins/sudo.py @@ -21,6 +21,22 @@ class Sudo(Plugin, IndependentPlugin): def setup(self): self.add_copy_spec("/etc/sudo*") + config_file = "/etc/sudo.conf" + log_files = ['/var/log/sudo_debug', '/var/log/sudoers_debug'] + try: + with open(config_file, 'r', encoding='UTF-8') as cfile: + for line in cfile: + if line.startswith('Debug'): + log_files.append(line.split()[2]) + except IOError as error: + self._log_error(f'Could not open conf file {config_file}: ' + f'{error}') + + if not self.get_option('all_logs'): + self.add_copy_spec(log_files) + else: + self.add_copy_spec([f"{log}*" for log in log_files]) + def postproc(self): regexp = r"(\s*bindpw\s*)\S+" self.do_file_sub("/etc/sudo-ldap.conf", regexp, r"\1********") diff --git a/sos/report/plugins/sunbeam.py b/sos/report/plugins/sunbeam.py index 150f5b25a2..1854637a63 100644 --- a/sos/report/plugins/sunbeam.py +++ b/sos/report/plugins/sunbeam.py @@ -46,7 +46,7 @@ def setup(self): self.add_cmd_output([ 'sunbeam cluster list', 'sunbeam cluster list --format yaml', - ]) + ], snap_cmd=True) sunbeam_user = self.get_option("sunbeam-user") try: @@ -100,7 +100,7 @@ def setup(self): "login") def _get_juju_cmd_details(self, user): - self.add_cmd_output("juju controllers", runas=user) + self.add_cmd_output("juju controllers", runas=user, snap_cmd=True) juju_controllers = self.collect_cmd_output( "juju controllers --format json", runas=user) @@ -113,7 +113,7 @@ def _get_juju_cmd_details(self, user): f'juju model-defaults -c {controller}', f'juju controller-config -c {controller}', f'juju controller-config -c {controller} --format json', - ], runas=user) + ], runas=user, snap_cmd=True) juju_models = self.collect_cmd_output( f'juju models -c {controller} --format json', @@ -131,7 +131,7 @@ def _get_juju_cmd_details(self, user): f'juju status -m {model_name} --format json', f'juju model-config -m {model_name}', f'juju model-config -m {model_name} --format json', - ], runas=user) + ], runas=user, snap_cmd=True) def postproc(self): diff --git a/sos/report/plugins/sunbeam_hypervisor.py b/sos/report/plugins/sunbeam_hypervisor.py index 1b52fcf566..167a1ed149 100644 --- a/sos/report/plugins/sunbeam_hypervisor.py +++ b/sos/report/plugins/sunbeam_hypervisor.py @@ -66,15 +66,33 @@ def postproc(self): connection_keys = ["connection", "sql_connection"] self.do_path_regex_sub( - fr"{self.common_dir}/etc/(nova|neutron)/*", + fr"{self.common_dir}/etc/(nova|neutron|ceilometer)/*", fr'(^\s*({"|".join(protect_keys)})\s*=\s*)(.*)', r"\1*********" ) self.do_path_regex_sub( - fr"{self.common_dir}/etc/(nova|neutron)/*", + fr"{self.common_dir}/etc/(nova|neutron|ceilometer)/*", fr'(^\s*({"|".join(connection_keys)})\s*=\s*(.*)' r'://(\w*):)(.*)(@(.*))', r"\1*********\6" ) + # hooks.log + protect_hook_keys = [ + "password", + "ovn_metadata_proxy_shared_secret", + "cacert", + "cert", + "key", + "ovn_cacert", + "ovn_cert", + "ovn_key", + ] + + self.do_file_sub( + f'{self.common_dir}/hooks.log', + fr'(\'({"|".join(protect_hook_keys)})\'):\s?\'(.+?)\'', + r"\1: **********" + ) + # vim: et ts=4 sw=4 diff --git a/sos/report/plugins/system.py b/sos/report/plugins/system.py index cc282dc1bb..fcba116162 100644 --- a/sos/report/plugins/system.py +++ b/sos/report/plugins/system.py @@ -40,5 +40,11 @@ def setup(self): "ld.so --list-tunables" ]) + def postproc(self): + self.do_paths_http_sub([ + "/etc/sysconfig", + "/etc/default", + "/etc/environment", + ]) # vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/systemd.py b/sos/report/plugins/systemd.py index a50a155e36..b23b32febc 100644 --- a/sos/report/plugins/systemd.py +++ b/sos/report/plugins/systemd.py @@ -95,4 +95,11 @@ def setup(self): ]) self.add_forbidden_path('/dev/null') + def postproc(self): + self.do_paths_http_sub([ + "/etc/systemd/system", + "/lib/systemd/system", + "/run/systemd/system", + ]) + # vim: set et ts=4 sw=4 : diff --git a/sos/report/plugins/vdo.py b/sos/report/plugins/vdo.py index f98af39f93..161d6a634e 100644 --- a/sos/report/plugins/vdo.py +++ b/sos/report/plugins/vdo.py @@ -28,6 +28,32 @@ def setup(self): vdos = self.collect_cmd_output('vdo list --all') for vdo in vdos['output'].splitlines(): self.add_cmd_output(f"vdo status -n {vdo}") - self.add_cmd_output('vdostats --human-readable') + self.add_cmd_output([ + 'vdostats --human-readable', + 'vdostats --verbose', + ]) + vdo_cols1 = ('vdo_slab_size,vdo_header_size,vdo_minimum_io_size,' + 'vdo_block_map_cache_size,vdo_block_map_era_length,' + 'vdo_write_policy,vdo_max_discard') + + vdo_cols2 = ('vdo_ack_threads,vdo_bio_rotation,vdo_bio_threads,' + 'vdo_cpu_threads,vdo_hash_zone_threads,' + 'vdo_logical_threads,vdo_physical_threads') + vdo_cols3 = ('vdo_compression,vdo_deduplication,' + 'vdo_use_metadata_hints,vdo_use_sparse_index,' + 'vdo_index_state,vdo_index_memory_size') + self.add_cmd_output([f"lvs -a -o +{cols}" + for cols in [vdo_cols1, vdo_cols2]]) + lvm_vdos = self.collect_cmd_output(f"lvs -a -o +{vdo_cols3}") + if lvm_vdos['status'] == 0: + for vdo in lvm_vdos['output'].splitlines(): + # we can find the pool and pool data maps in the output + # of lvs, in the column Volume type, marked as 'D' + lv, vg, lv_attr = vdo.split()[:3] + if lv_attr.startswith("D"): + vdo_path = f"{vg}-{lv.strip('[]')}" + self.add_cmd_output( + f"vdodumpconfig /dev/mapper/{vdo_path}" + ) # vim set et ts=4 sw=4 : diff --git a/sos/report/plugins/vmware.py b/sos/report/plugins/vmware.py index a89f3f7a27..1ca1857d50 100644 --- a/sos/report/plugins/vmware.py +++ b/sos/report/plugins/vmware.py @@ -6,10 +6,10 @@ # # See the LICENSE file in the source distribution for further information. -from sos.report.plugins import Plugin, RedHatPlugin +from sos.report.plugins import Plugin, IndependentPlugin -class VMWare(Plugin, RedHatPlugin): +class VMWare(Plugin, IndependentPlugin): short_desc = 'VMWare client information' diff --git a/sos/report/reporting.py b/sos/report/reporting.py index 5adba5920e..d87f8d35bd 100644 --- a/sos/report/reporting.py +++ b/sos/report/reporting.py @@ -19,20 +19,20 @@ import simplejson as json -class Node(object): +class Node: data = {} def __str__(self): return json.dumps(self.data) + # pylint: disable=unused-argument def can_add(self, node): return False class Leaf(Node): """Marker class that can be added to a Section node""" - pass class Report(Node): @@ -127,7 +127,7 @@ def ends_bs(string): return string.endswith('\\') -class PlainTextReport(object): +class PlainTextReport: """Will generate a plain text report from a top_level Report object""" HEADER = "" @@ -159,7 +159,7 @@ def __init__(self, report_node): def unicode(self): self.line_buf = line_buf = [] - if (len(self.HEADER) > 0): + if len(self.HEADER) > 0: line_buf.append(self.HEADER) # generate section/plugin list, split long list to multiple lines @@ -182,12 +182,12 @@ def unicode(self): self.process_subsection(section_contents, type_.ADDS_TO, header, format_, footer) - if (len(self.FOOTER) > 0): + if len(self.FOOTER) > 0: line_buf.append(self.FOOTER) - output = u'\n'.join(map(lambda i: (i if isinstance(i, str) - else i.decode('utf8', 'ignore')), - line_buf)) + output = '\n'.join(map(lambda i: (i if isinstance(i, str) + else i.decode('utf8', 'ignore')), + line_buf)) return output def process_subsection(self, section, key, header, format_, footer): @@ -198,7 +198,7 @@ def process_subsection(self, section, key, header, format_, footer): key=lambda x: x["name"] if isinstance(x, dict) else '' ): self.line_buf.append(format_ % item) - if (len(footer) > 0): + if len(footer) > 0: self.line_buf.append(footer) diff --git a/sos/utilities.py b/sos/utilities.py index a69536b309..e666c64972 100644 --- a/sos/utilities.py +++ b/sos/utilities.py @@ -28,6 +28,8 @@ except ImportError: from pkg_resources import parse_version +log = logging.getLogger('sos') + # try loading magic>=0.4.20 which implements detect_from_filename method magic_mod = False try: @@ -35,14 +37,13 @@ magic.detect_from_filename(__file__) magic_mod = True except (ImportError, AttributeError): - log = logging.getLogger('sos') from textwrap import fill - msg = ("""\ + msg = """\ WARNING: Failed to load 'magic' module version >= 0.4.20 which sos aims to \ use for detecting binary files. A less effective method will be used. It is \ recommended to install proper python3-magic package with the module. -""") - log.warning('\n' + fill(msg, 72, replace_whitespace=False) + '\n') +""" + log.warning(f'\n{fill(msg, 72, replace_whitespace=False)}\n') TIMEOUT_DEFAULT = 300 @@ -112,28 +113,21 @@ def fileobj(path_or_file, mode='r'): """Returns a file-like object that can be used as a context manager""" if isinstance(path_or_file, str): try: - return open(path_or_file, mode) + return open(path_or_file, mode, encoding='utf-8') except IOError: - log = logging.getLogger('sos') log.debug(f"fileobj: {path_or_file} could not be opened") return closing(io.StringIO()) else: return closing(path_or_file) -def convert_bytes(bytes_, K=1 << 10, M=1 << 20, G=1 << 30, T=1 << 40): +def convert_bytes(num_bytes): """Converts a number of bytes to a shorter, more human friendly format""" - fn = float(bytes_) - if bytes_ >= T: - return f'{(fn / T):.1fT}' - elif bytes_ >= G: - return f'{(fn / G):.1fG}' - elif bytes_ >= M: - return f'{(fn / M):.1fM}' - elif bytes_ >= K: - return f'{(fn / K):.1fK}' - else: - return f'{bytes_}' + sizes = {'T': 1 << 40, 'G': 1 << 30, 'M': 1 << 20, 'K': 1 << 10} + for symbol, size in sizes.items(): + if num_bytes >= size: + return f"{float(num_bytes) / size:.1f}{symbol}" + return f"{num_bytes}" def file_is_binary(fname): @@ -160,7 +154,7 @@ def file_is_binary(fname): pass # if for some reason the above check fails or magic>=0.4.20 is not present, # fail over to checking the very first byte of the file content - with open(fname, 'tr') as tfile: + with open(fname, 'tr', encoding='utf-8') as tfile: try: # when opened as above (tr), reading binary content will raise # an exception @@ -235,7 +229,7 @@ def _child_prep_fn(): os.setgid(pwd.getpwnam(runas).pw_gid) os.setuid(pwd.getpwnam(runas).pw_uid) os.chdir(pwd.getpwnam(runas).pw_dir) - if (chdir): + if chdir: os.chdir(chdir) def _check_poller(proc): @@ -281,63 +275,61 @@ def _check_poller(proc): else: expanded_args.append(arg) if to_file: - _output = open(to_file, 'w') + # pylint: disable=consider-using-with + _output = open(to_file, 'w', encoding='utf-8') else: _output = PIPE try: - p = Popen(expanded_args, shell=False, stdout=_output, - stderr=STDOUT if stderr else PIPE, - bufsize=-1, env=cmd_env, close_fds=True, - preexec_fn=_child_prep_fn) + with Popen(expanded_args, shell=False, stdout=_output, + stderr=STDOUT if stderr else PIPE, + bufsize=-1, env=cmd_env, close_fds=True, + preexec_fn=_child_prep_fn) as p: - if not to_file: - reader = AsyncReader(p.stdout, sizelimit, binary) - else: - reader = FakeReader(p, binary) - - if poller: - while reader.running: - _check_poller(p) - else: - try: - # override timeout=0 to timeout=None, as Popen will treat the - # former as a literal 0-second timeout - p.wait(timeout if timeout else None) - except Exception: - p.terminate() - if to_file: - _output.close() - # until we separate timeouts from the `timeout` command - # handle per-cmd timeouts via Plugin status checks - reader.running = False - return {'status': 124, 'output': reader.get_contents(), - 'truncated': reader.is_full} - if to_file: - _output.close() + if not to_file: + reader = AsyncReader(p.stdout, sizelimit, binary) + else: + reader = FakeReader(p, binary) - # wait for Popen to set the returncode - while p.poll() is None: - pass + if poller: + while reader.running: + _check_poller(p) + else: + try: + # override timeout=0 to timeout=None, as Popen will treat + # the former as a literal 0-second timeout + p.wait(timeout if timeout else None) + except Exception: + p.terminate() + if to_file: + _output.close() + # until we separate timeouts from the `timeout` command + # handle per-cmd timeouts via Plugin status checks + reader.running = False + return {'status': 124, 'output': reader.get_contents(), + 'truncated': reader.is_full} + if to_file: + _output.close() + + # wait for Popen to set the returncode + while p.poll() is None: + pass - stdout = reader.get_contents() - truncated = reader.is_full + if p.returncode in (126, 127): + stdout = b"" + else: + stdout = reader.get_contents() + return { + 'status': p.returncode, + 'output': stdout, + 'truncated': reader.is_full + } except OSError as e: if to_file: _output.close() if e.errno == errno.ENOENT: return {'status': 127, 'output': "", 'truncated': ''} - else: - raise e - - if p.returncode == 126 or p.returncode == 127: - stdout = b"" - - return { - 'status': p.returncode, - 'output': stdout, - 'truncated': truncated - } + raise e def import_module(module_fqname, superclasses=None): @@ -554,8 +546,7 @@ def get_contents(self): time.sleep(0.01) if not self.binary: return ''.join(ln.decode('utf-8', 'ignore') for ln in self.deque) - else: - return b''.join(ln for ln in self.deque) + return b''.join(ln for ln in self.deque) @property def is_full(self): @@ -565,7 +556,7 @@ def is_full(self): return len(self.deque) == self.slots -class ImporterHelper(object): +class ImporterHelper: """Provides a list of modules that can be imported in a package. Importable modules are located along the module __path__ list and modules are files that end in .py. @@ -580,7 +571,7 @@ def __init__(self, package): def _plugin_name(self, path): "Returns the plugin module name given the path" base = os.path.basename(path) - name, ext = os.path.splitext(base) + name, _ = os.path.splitext(base) return name def _get_plugins_from_list(self, list_): diff --git a/tests/cleaner_tests/basic_function_tests/report_with_mask.py b/tests/cleaner_tests/basic_function_tests/report_with_mask.py index 919ddaa1e4..d98e54a500 100644 --- a/tests/cleaner_tests/basic_function_tests/report_with_mask.py +++ b/tests/cleaner_tests/basic_function_tests/report_with_mask.py @@ -6,10 +6,10 @@ # # See the LICENSE file in the source distribution for further information. -from sos_tests import StageOneReportTest, StageTwoReportTest - -import re from os import stat +import re + +from sos_tests import StageOneReportTest, StageTwoReportTest class ReportWithMask(StageOneReportTest): @@ -25,8 +25,8 @@ def pre_sos_setup(self): # obfuscate a random word from /etc/hosts and ensure the updated # sanitised file has same permissions (a+r) try: - self.hosts_obfuscated = open( - '/etc/hosts').read().strip('#\n').split()[-1] + with open('/etc/hosts', encoding='utf-8') as fp: + self.obsfuncated = fp.read().strip('#\n').split()[-1] except (FileNotFoundError, IndexError) as e: self.warning(f"Unable to process /etc/hosts: {e}") if self.hosts_obfuscated: diff --git a/tests/cleaner_tests/existing_archive.py b/tests/cleaner_tests/existing_archive.py index 57bae713ee..492fea3d58 100644 --- a/tests/cleaner_tests/existing_archive.py +++ b/tests/cleaner_tests/existing_archive.py @@ -23,7 +23,7 @@ class ExistingArchiveCleanTest(StageTwoReportTest): :avocado: tags=stagetwo """ - sos_cmd = '-v tests/test_data/%s.tar.xz' % ARCHIVE + sos_cmd = f'tests/test_data/{ARCHIVE}.tar.xz' sos_component = 'clean' def test_obfuscation_log_created(self): @@ -34,9 +34,9 @@ def test_obfuscation_log_created(self): def test_archive_type_correct(self): with open(os.path.join( self.tmpdir, - f'{ARCHIVE}-obfuscation.log'), 'r') as log: + f'{ARCHIVE}-obfuscation.log'), 'r', encoding='utf-8') as log: for line in log: - if "Loaded %s" % ARCHIVE in line: + if f"Loaded {ARCHIVE}" in line: assert \ 'as type sos report archive' in line, \ f"Incorrect archive type detected: {line}" @@ -45,7 +45,7 @@ def test_archive_type_correct(self): def test_from_cmdline_logged(self): with open(os.path.join( self.tmpdir, - f'{ARCHIVE}-obfuscation.log'), 'r') as log: + f'{ARCHIVE}-obfuscation.log'), 'r', encoding='utf-8') as log: for line in log: if 'From cmdline' in line: assert \ @@ -56,7 +56,7 @@ def test_from_cmdline_logged(self): def test_extraction_completed_successfully(self): with open(os.path.join( self.tmpdir, - f'{ARCHIVE}-obfuscation.log'), 'r') as log: + f'{ARCHIVE}-obfuscation.log'), 'r', encoding='utf-8') as log: for line in log: if 'Extracted path is' in line: path = line.split('Extracted path is')[-1].strip() @@ -92,12 +92,12 @@ def test_no_empty_obfuscations(self): map_file = re.findall( '/.*sosreport-.*-private_map', self.cmd_output.stdout )[-1] - with open(map_file, 'r') as mf: + with open(map_file, 'r', encoding='utf-8') as mf: map_json = json.load(mf) for mapping in map_json: for key, val in map_json[mapping].items(): - assert key, "Empty key found in %s" % mapping - assert val, "%s mapping for '%s' empty" % (mapping, key) + assert key, f"Empty key found in {mapping}" + assert val, f"{mapping} mapping for '{key}' empty" def test_ip_not_in_any_file(self): content = self.grep_for_content('10.0.0.15') @@ -124,7 +124,7 @@ class ExistingArchiveCleanTmpTest(StageTwoReportTest): :avocado: tags=stagetwo """ - sos_cmd = f'-v --keywords var,tmp,avocado --disable-parsers \ + sos_cmd = f'--keywords var,tmp,avocado --disable-parsers \ ip,ipv6,mac,username --no-update tests/test_data/{ARCHIVE}.tar.xz' sos_component = 'clean' diff --git a/tests/cleaner_tests/full_report/full_report_run.py b/tests/cleaner_tests/full_report/full_report_run.py index b7ce07424b..a00d9cea8d 100644 --- a/tests/cleaner_tests/full_report/full_report_run.py +++ b/tests/cleaner_tests/full_report/full_report_run.py @@ -20,7 +20,7 @@ class FullCleanTest(StageTwoReportTest): :avocado: tags=stagetwo """ - sos_cmd = '-v --clean' + sos_cmd = '--clean' sos_timeout = 600 # replace with an empty placeholder, make sure that this test case is not # influenced by previous clean runs @@ -38,10 +38,10 @@ def pre_sos_setup(self): short = host.split('.')[0] sosfd = journal.stream('sos-testing') sosfd.write( - "This is a test line from sos clean testing. The hostname %s " - "should not appear, nor should %s in an obfuscated archive. The " - "shortnames of %s and %s should also not appear." - % (host.lower(), host.upper(), short.lower(), short.upper()) + f"This is a test line from sos clean testing. The hostname " + f"{host.lower()} should not appear, nor should {host.upper()} " + f"in an obfuscated archive. The shortnames of {short.lower()} " + f"and {short.upper()} should also not appear." ) def test_private_map_was_generated(self): @@ -77,12 +77,12 @@ def test_no_empty_obfuscations(self): '/.*sosreport-.*-private_map', self.cmd_output.stdout )[-1] - with open(map_file, 'r') as mf: + with open(map_file, 'r', encoding='utf-8') as mf: map_json = json.load(mf) for mapping in map_json: for key, val in map_json[mapping].items(): - assert key, "Empty key found in %s" % mapping - assert val, "%s mapping for '%s' empty" % (mapping, key) + assert key, f"Empty key found in {mapping}" + assert val, f"{mapping} mapping for '{key}' empty" def test_ip_not_in_any_file(self): ip = self.sysinfo['pre']['networking']['ip_addr'] diff --git a/tests/cleaner_tests/ipv6_test/ipv6_test.py b/tests/cleaner_tests/ipv6_test/ipv6_test.py index 1334d92d81..e0a1fef60e 100644 --- a/tests/cleaner_tests/ipv6_test/ipv6_test.py +++ b/tests/cleaner_tests/ipv6_test/ipv6_test.py @@ -20,7 +20,7 @@ class IPv6Test(StageTwoReportTest): """ install_plugins = ['ipv6'] - sos_cmd = '-v --clean -o ipv6' + sos_cmd = '--clean -o ipv6' sos_timeout = 600 # replace default mapping to avoid being influenced by previous runs # place mock file with crafted address used by mocked plugin diff --git a/tests/cleaner_tests/report_disabled_parsers.py b/tests/cleaner_tests/report_disabled_parsers.py index c313485a19..9ce06b4867 100644 --- a/tests/cleaner_tests/report_disabled_parsers.py +++ b/tests/cleaner_tests/report_disabled_parsers.py @@ -59,7 +59,7 @@ class NativeCleanDisabledParsersTest(StageTwoReportTest): :avocado: tags=stagetwo """ - sos_cmd = "--disable-parsers=hostname tests/test_data/%s" % ARCHIVE + sos_cmd = f"--disable-parsers=hostname tests/test_data/{ARCHIVE}" sos_component = 'clean' def test_localhost_not_obfuscated(self): diff --git a/tests/report_tests/basic_report_tests.py b/tests/report_tests/basic_report_tests.py index 6489670e9c..5f722e13bc 100644 --- a/tests/report_tests/basic_report_tests.py +++ b/tests/report_tests/basic_report_tests.py @@ -48,9 +48,9 @@ def test_tag_summary_created(self): ) def test_dir_listing_works(self): - self.assertFileCollected('sos_commands/boot/ls_-alhZR_.boot') - boot_ls = self.get_name_in_archive('sos_commands/boot/ls_-alhZR_.boot') - with open(boot_ls, 'r') as ls_file: + self.assertFileCollected('sos_commands/boot/ls_-alZR_.boot') + boot_ls = self.get_name_in_archive('sos_commands/boot/ls_-alZR_.boot') + with open(boot_ls, 'r', encoding='utf-8') as ls_file: # make sure we actually got ls output ln = ls_file.readline().strip() self.assertEqual( diff --git a/tests/report_tests/options_tests/options_tests.py b/tests/report_tests/options_tests/options_tests.py index 15f0b57248..013283b6c7 100644 --- a/tests/report_tests/options_tests/options_tests.py +++ b/tests/report_tests/options_tests/options_tests.py @@ -17,7 +17,7 @@ class OptionsFromConfigTest(StageTwoReportTest): """ files = [('options_tests_sos.conf', '/etc/sos/sos.conf')] - sos_cmd = '-v ' + sos_cmd = '' def test_case_id_from_config(self): self.assertTrue('8675309' in self.archive) diff --git a/tests/report_tests/plugin_tests/logs.py b/tests/report_tests/plugin_tests/logs.py index ed799a63c7..7fbe9a5255 100644 --- a/tests/report_tests/plugin_tests/logs.py +++ b/tests/report_tests/plugin_tests/logs.py @@ -61,7 +61,7 @@ def pre_sos_setup(self): # allowing for any compression or de-dupe systemd does sosfd = journal.stream('sos-testing') rsize = 10 * 1048576 - for i in range(2): + for _ in range(2): # generate 10MB, write it, then write it in reverse. # Spend less time generating new strings rand = ''.join( diff --git a/tests/sos_tests.py b/tests/sos_tests.py index a2cca0d9c9..6ef91517e2 100644 --- a/tests/sos_tests.py +++ b/tests/sos_tests.py @@ -36,6 +36,7 @@ def skipIf(cond, message=None): + # pylint: disable=unused-argument def decorator(function): def wrapper(self, *args, **kwargs): if callable(cond): @@ -47,6 +48,7 @@ def wrapper(self, *args, **kwargs): return decorator +# pylint: disable=unused-argument def redhat_only(tst): def wrapper(func): if distro.detect().name not in RH_DIST: @@ -54,6 +56,7 @@ def wrapper(func): return wrapper +# pylint: disable=unused-argument def ubuntu_only(tst): def wrapper(func): if distro.detect().name not in UBUNTU_DIST: @@ -156,12 +159,12 @@ def _execute_sos_cmd(self): # entire test suite, which will become very difficult to read # don't flood w/ super verbose logs - LOG_UI.error('ERROR:\n' + msg[:8196]) + LOG_UI.error(f'ERROR:\n{msg[:8196]}') if err.result.interrupted: - raise Exception("Timeout exceeded, see output above") - else: - raise Exception("Command failed, see output above: " - f"'{err.command.split('bin/')[1]}'") + raise Exception("Timeout exceeded, see output " + "above") from err + raise Exception("Command failed, see output above: " + f"'{err.command.split('bin/')[1]}'") from err with open(os.path.join(self.tmpdir, 'output'), 'wb') as pfile: pickle.dump(self.cmd_output, pfile) self.cmd_output.stdout = self.cmd_output.stdout.decode() @@ -177,13 +180,13 @@ def _write_file_to_tmpdir(self, fname, content): fname = os.path.join(self.tmpdir, fname) if isinstance(content, bytes): content = content.decode() - with open(fname, 'w') as wfile: + with open(fname, 'w', encoding='utf-8') as wfile: wfile.write(content) def read_file_from_tmpdir(self, fname): fname = os.path.join(self.tmpdir, fname) try: - with open(fname, 'r') as tfile: + with open(fname, 'r', encoding='utf-8') as tfile: return tfile.read() except Exception: pass @@ -317,20 +320,17 @@ def post_test_tear_down(self): """Called at the end of a test run to ensure that any needed per-test cleanup can be done. """ - pass def setup_mocking(self): """Since we need to use setUp() in our overrides of avocado.Test, provide an alternate method for test cases that subclass BaseSoSTest to use. """ - pass def pre_sos_setup(self): """Do any needed non-mocking setup prior to the sos execution that is called in setUp() """ - pass def assertFileExists(self, fname): """Asserts that fname exists on the filesystem""" @@ -387,17 +387,17 @@ def manifest(self): self._manifest = json.loads(content) except Exception: self._manifest = '' - self.warning('Could not load manifest for test') + self.log.warn('Could not load manifest for test') return self._manifest @property def encrypted_path(self): return self.get_encrypted_path() - def _decrypt_archive(self, archive): - _archive = archive.strip('.gpg') + def _decrypt_archive(self, archive_arg): + _archive = archive_arg.strip('.gpg') cmd = (f"gpg --batch --passphrase {self.encrypt_pass} -o {_archive} " - f"--decrypt {archive}") + f"--decrypt {archive_arg}") try: process.run(cmd, timeout=10) except Exception as err: @@ -430,11 +430,10 @@ def grep_for_content(self, search, regexp=False): # grep will return an exit code of 1 if no matches are found, # which is what we want return False - else: - flist = [] - for ln in out.stdout.decode('utf-8').splitlines(): - flist.append(ln.split(self.tmpdir)[-1]) - return flist + flist = [] + for ln in out.stdout.decode('utf-8').splitlines(): + flist.append(ln.split(self.tmpdir)[-1]) + return flist def get_encrypted_path(self): """Since avocado re-instantiates a new object for every test_ method, @@ -510,7 +509,8 @@ def get_file_content(self, fname): :rtype: ``str`` """ content = '' - with open(self.get_name_in_archive(fname), 'r') as gfile: + with open(self.get_name_in_archive(fname), 'r', + encoding='utf-8') as gfile: content = gfile.read() return content @@ -580,7 +580,7 @@ def assertFileHasContent(self, fname, content): matched = False fname = self.get_name_in_archive(fname) self.assertFileExists(fname) - with open(fname, 'r') as lfile: + with open(fname, 'r', encoding='utf-8') as lfile: _contents = lfile.read() for line in _contents.splitlines(): if re.match(f".*{content}.*", line, re.I): @@ -601,7 +601,7 @@ def assertFileNotHasContent(self, fname, content): """ matched = False fname = self.get_name_in_archive(fname) - with open(fname, 'r') as mfile: + with open(fname, 'r', encoding='utf-8') as mfile: for line in mfile.read().splitlines(): if re.match(f".*{content}.*", line, re.I): matched = True @@ -822,9 +822,9 @@ def setUp(self): self.end_of_test_case = False self.sm = software_manager.manager.SoftwareManager() - for dist in self.packages: - if isinstance(self.packages[dist], str): - self.packages[dist] = [self.packages[dist]] + for dist, value in self.packages.items(): + if isinstance(value, str): + self.packages[dist] = [value] keys = self.packages.keys() # allow for single declaration of packages for the RH family diff --git a/tests/unittests/archive_tests.py b/tests/unittests/archive_tests.py index d3d1809d7a..fbc1fe6d38 100644 --- a/tests/unittests/archive_tests.py +++ b/tests/unittests/archive_tests.py @@ -27,9 +27,8 @@ def tearDown(self): shutil.rmtree(self.tmpdir) def check_for_file(self, filename): - rtf = tarfile.open(os.path.join(self.tmpdir, 'test.tar.xz')) - rtf.getmember(filename) - rtf.close() + with tarfile.open(os.path.join(self.tmpdir, 'test.tar.xz')) as rtf: + rtf.getmember(filename) def test_create(self): self.tf.finalize('auto') @@ -51,10 +50,10 @@ def test_add_node_dev_null(self): # when the string comes from tail() output def test_add_string_from_file(self): self.copy_strings = [] - testfile = tempfile.NamedTemporaryFile(dir=self.tmpdir, delete=False) - testfile.write(b"*" * 1000) - testfile.flush() - testfile.close() + with tempfile.NamedTemporaryFile(dir=self.tmpdir, delete=False) \ + as testfile: + testfile.write(b"*" * 1000) + testfile.flush() self.copy_strings.append((tail(testfile.name, 100), 'string_test.txt')) self.tf.add_string(self.copy_strings[0][0], 'tests/string_test.txt') diff --git a/tests/unittests/juju/juju_cluster_tests.py b/tests/unittests/juju/juju_cluster_tests.py index 1c8054f69d..216e363862 100644 --- a/tests/unittests/juju/juju_cluster_tests.py +++ b/tests/unittests/juju/juju_cluster_tests.py @@ -23,8 +23,9 @@ def __init__(self): def get_juju_output(model): - dir = pathlib.Path(__file__).parent.resolve() - with open(dir / "data" / f"juju_output_{model}.json") as f: + _dir = pathlib.Path(__file__).parent.resolve() + with open(_dir / "data" / f"juju_output_{model}.json", + encoding='utf-8') as f: return f.read() @@ -59,6 +60,7 @@ class JujuTest(unittest.TestCase): "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) + # pylint: disable=unused-argument def test_get_nodes_no_filter(self, mock_exec_primary_cmd): """No filter.""" mock_opts = MockOptions() @@ -69,7 +71,7 @@ def test_get_nodes_no_filter(self, mock_exec_primary_cmd): } ) nodes = cluster.get_nodes() - assert nodes == [] + assert not nodes @patch( "sos.collector.clusters.juju.juju._get_juju_version", @@ -79,6 +81,7 @@ def test_get_nodes_no_filter(self, mock_exec_primary_cmd): "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) + # pylint: disable=unused-argument def test_get_nodes_app_filter( self, mock_exec_primary_cmd, mock_get_juju_version ): @@ -113,6 +116,7 @@ def test_get_nodes_app_filter( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) + # pylint: disable=unused-argument def test_get_nodes_app_regex_filter( self, mock_exec_primary_cmd, mock_get_juju_version ): @@ -147,6 +151,7 @@ def test_get_nodes_app_regex_filter( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) + # pylint: disable=unused-argument def test_get_nodes_model_filter_multiple_models( self, mock_exec_primary_cmd, mock_get_juju_version ): @@ -198,6 +203,7 @@ def test_get_nodes_model_filter_multiple_models( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) + # pylint: disable=unused-argument def test_get_nodes_model_filter( self, mock_exec_primary_cmd, mock_get_juju_version ): @@ -246,6 +252,7 @@ def test_get_nodes_model_filter( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) + # pylint: disable=unused-argument def test_get_nodes_unit_filter( self, mock_exec_primary_cmd, mock_get_juju_version ): @@ -277,6 +284,7 @@ def test_get_nodes_unit_filter( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) + # pylint: disable=unused-argument def test_get_nodes_machine_filter( self, mock_exec_primary_cmd, mock_get_juju_version ): @@ -309,6 +317,7 @@ def test_get_nodes_machine_filter( "sos.collector.clusters.juju.juju.exec_primary_cmd", side_effect=get_juju_status, ) + # pylint: disable=unused-argument def test_subordinates(self, mock_exec_primary_cmd, mock_get_juju_version): """Subordinate filter.""" mock_opts = MockOptions() diff --git a/tests/unittests/juju/juju_transports_test.py b/tests/unittests/juju/juju_transports_test.py index 911a957e43..f734c70490 100644 --- a/tests/unittests/juju/juju_transports_test.py +++ b/tests/unittests/juju/juju_transports_test.py @@ -16,7 +16,7 @@ from sos.collector.transports.juju import JujuSSH -class MockCmdLineOpts(object): +class MockCmdLineOpts: ssh_user = "user_abc" sudo_pw = "pw_abc" root_password = "root_pw_abc" @@ -43,6 +43,7 @@ def test_check_juju_installed_err(self, mock_subprocess_check_output): self.juju_ssh._check_juju_installed() @patch("sos.collector.transports.juju.subprocess.check_output") + # pylint: disable=unused-argument def test_check_juju_installed_true(self, mock_subprocess_check_output): """Return True if juju is installed.""" result = self.juju_ssh._check_juju_installed() @@ -61,6 +62,7 @@ def test_chmod(self, mock_subprocess_check_output): "sos.collector.transports.juju.JujuSSH._check_juju_installed", return_value=True, ) + # pylint: disable=unused-argument def test_connect(self, mock_result): self.juju_ssh.connect(password=None) assert self.juju_ssh.connected @@ -75,6 +77,7 @@ def test_remote_exec(self): return_value={"status": 0}, ) @patch("sos.collector.transports.juju.JujuSSH._chmod", return_value=True) + # pylint: disable=unused-argument def test_retrieve_file(self, mock_chmod, mock_sos_get_cmd_output): self.juju_ssh._retrieve_file(fname="file_abc", dest="/tmp/sos-juju/") mock_sos_get_cmd_output.assert_called_with( diff --git a/tests/unittests/option_tests.py b/tests/unittests/option_tests.py index a1fa480717..5b2c326e29 100644 --- a/tests/unittests/option_tests.py +++ b/tests/unittests/option_tests.py @@ -12,7 +12,7 @@ from sos.policies.init_systems import InitSystem -class MockOptions(object): +class MockOptions: all_logs = False dry_run = False log_size = 25 diff --git a/tests/unittests/plugin_tests.py b/tests/unittests/plugin_tests.py index d659fb3dbb..69c4a9241a 100644 --- a/tests/unittests/plugin_tests.py +++ b/tests/unittests/plugin_tests.py @@ -26,18 +26,19 @@ def j(filename): return os.path.join(PATH, filename) -def create_file(size, dir=None): - f = tempfile.NamedTemporaryFile(delete=False, dir=dir, mode='w') - fsize = size * 1024 * 1024 - content = ''.join(random.choice(ascii_lowercase) for x in range(fsize)) - f.write(content) - f.flush() - f.close() - return f.name +def create_file(size, dirname=None): + with tempfile.NamedTemporaryFile(delete=False, dir=dirname, mode='w') as f: + fsize = size * 1024 * 1024 + content = ''.join(random.choice(ascii_lowercase) for x in range(fsize)) + f.write(content) + f.flush() + return f.name + return None class MockArchive(TarFileArchive): + # pylint: disable=super-init-not-called def __init__(self): self.m = {} self.strings = {} @@ -50,14 +51,14 @@ def add_file(self, src, dest=None, force=False): dest = src self.m[src] = dest - def add_string(self, content, dest): + def add_string(self, content, dest, mode='w'): self.m[dest] = content - def add_link(self, dest, link_name): + def add_link(self, source, link_name): pass - def open_file(self, name): - return open(self.m.get(name), 'r') + def open_file(self, path): + return open(self.m.get(path), 'r', encoding='utf-8') def close(self): pass @@ -110,11 +111,12 @@ def setup(self): class EnablerPlugin(Plugin): - def is_installed(self, pkg): + # pylint: disable=unused-argument + def is_installed(self, package_name): return self.is_installed -class MockOptions(object): +class MockOptions: all_logs = False dry_run = False since = None @@ -129,14 +131,14 @@ class MockOptions(object): class PluginToolTests(unittest.TestCase): def test_regex_findall(self): - test_s = u"\n".join( + test_s = "\n".join( ['this is only a test', 'there are only two lines']) test_fo = StringIO(test_s) matches = regex_findall(r".*lines$", test_fo) self.assertEqual(matches, ['there are only two lines']) def test_regex_findall_miss(self): - test_s = u"\n".join( + test_s = "\n".join( ['this is only a test', 'there are only two lines']) test_fo = StringIO(test_s) matches = regex_findall(r".*not_there$", test_fo) @@ -376,8 +378,8 @@ def test_glob_file(self): def test_glob_file_limit_no_limit(self): self.mp.sysroot = '/' tmpdir = tempfile.mkdtemp() - create_file(2, dir=tmpdir) - create_file(2, dir=tmpdir) + create_file(2, dirname=tmpdir) + create_file(2, dirname=tmpdir) self.mp.add_copy_spec(tmpdir + "/*") self.assertEqual(len(self.mp.copy_paths), 2) shutil.rmtree(tmpdir) @@ -385,11 +387,11 @@ def test_glob_file_limit_no_limit(self): def test_glob_file_over_limit(self): self.mp.sysroot = '/' tmpdir = tempfile.mkdtemp() - create_file(2, dir=tmpdir) - create_file(2, dir=tmpdir) + create_file(2, dirname=tmpdir) + create_file(2, dirname=tmpdir) self.mp.add_copy_spec(tmpdir + "/*", 1) self.assertEqual(len(self.mp._tail_files_list), 1) - fname, _size = self.mp._tail_files_list[0] + _, _size = self.mp._tail_files_list[0] self.assertEqual(1024 * 1024, _size) shutil.rmtree(tmpdir) @@ -429,8 +431,8 @@ def test_checks_for_package(self): def test_allows_bad_tuple(self): f = j("tail_test.txt") - self.mp.files = (f) - self.mp.packages = ('foo') + self.mp.files = f + self.mp.packages = 'foo' self.assertTrue(self.mp.check_enabled()) def test_enabled_by_default(self): diff --git a/tests/unittests/policy_tests.py b/tests/unittests/policy_tests.py index a64275b8cb..bdca6fcf35 100644 --- a/tests/unittests/policy_tests.py +++ b/tests/unittests/policy_tests.py @@ -21,10 +21,18 @@ class FauxPolicy(Policy): distro = "Faux" + @classmethod + def check(cls, remote=''): + return False + class FauxLinuxPolicy(LinuxPolicy): distro = "FauxLinux" + @classmethod + def check(cls, remote=''): + return False + @classmethod def set_forbidden_paths(cls): return ['/etc/secret'] diff --git a/tests/unittests/report_tests.py b/tests/unittests/report_tests.py index f9ea2daab0..1f4c400ba5 100644 --- a/tests/unittests/report_tests.py +++ b/tests/unittests/report_tests.py @@ -71,7 +71,7 @@ def setUp(self): self.section = Section(name="plugin") self.div = '\n' + PlainTextReport.PLUGDIVIDER self.pluglist = "Loaded Plugins:\n{pluglist}" - self.defaultheader = u''.join([ + self.defaultheader = ''.join([ self.pluglist.format(pluglist=" plugin"), self.div, "\nplugin\n" @@ -92,7 +92,7 @@ def test_two_sections(self): section2 = Section(name="second") self.report.add(section1, section2) - self.assertEqual(u''.join([ + self.assertEqual(''.join([ self.pluglist.format(pluglist=" first second"), self.div, "\nfirst", @@ -108,7 +108,7 @@ def test_command(self): self.section.add(cmd) self.report.add(self.section) - self.assertEqual(u''.join([ + self.assertEqual(''.join([ self.defaultheader, "- commands executed:\n * ls -al /foo/bar/baz" ]), @@ -119,7 +119,7 @@ def test_copied_file(self): self.section.add(cf) self.report.add(self.section) - self.assertEqual(u''.join([ + self.assertEqual(''.join([ self.defaultheader, "- files copied:\n * /etc/hosts" ]), @@ -131,7 +131,7 @@ def test_created_file(self): self.section.add(crf) self.report.add(self.section) - self.assertEqual(u''.join([ + self.assertEqual(''.join([ self.defaultheader, "- files created:\n * sample.txt" ]), @@ -142,7 +142,7 @@ def test_alert(self): self.section.add(alrt) self.report.add(self.section) - self.assertEqual(u''.join([ + self.assertEqual(''.join([ self.defaultheader, "- alerts:\n ! this is an alert" ]), diff --git a/tests/unittests/utilities_tests.py b/tests/unittests/utilities_tests.py index 4a9ef5ec7c..d8d5bed5f4 100644 --- a/tests/unittests/utilities_tests.py +++ b/tests/unittests/utilities_tests.py @@ -20,7 +20,7 @@ class GrepTest(unittest.TestCase): def test_file_obj(self): - test_s = u"\n".join( + test_s = "\n".join( ['this is only a test', 'there are only two lines']) test_fo = StringIO(test_s) matches = grep(".*test$", test_fo) @@ -31,8 +31,9 @@ def test_real_file(self): self.assertEqual(matches, ['import unittest\n']) def test_open_file(self): - matches = grep(".*unittest$", open(__file__.replace(".pyc", ".py"))) - self.assertEqual(matches, ['import unittest\n']) + with open(__file__.replace(".pyc", ".py"), encoding='utf-8') as f: + matches = grep(".*unittest$", f) + self.assertEqual(matches, ['import unittest\n']) def test_grep_multiple_files(self): matches = grep(".*unittest$", @@ -48,8 +49,9 @@ def test_tail(self): def test_tail_too_many(self): t = tail("tests/unittests/tail_test.txt", 200) - expected = open("tests/unittests/tail_test.txt", "r").read() - self.assertEqual(t, str.encode(expected)) + with open("tests/unittests/tail_test.txt", "r", + encoding='utf-8') as expected: + self.assertEqual(t, str.encode(expected.read())) class ExecutableTest(unittest.TestCase): diff --git a/tests/vendor_tests/redhat/rhbz1928628.py b/tests/vendor_tests/redhat/rhbz1928628.py index 6e473e7c06..fa71ff5d9a 100644 --- a/tests/vendor_tests/redhat/rhbz1928628.py +++ b/tests/vendor_tests/redhat/rhbz1928628.py @@ -7,7 +7,8 @@ # See the LICENSE file in the source distribution for further information. -from report_tests.plugin_tests.networking import NetworkingPluginTest +from tests.report_tests.plugin_tests.networking.networking import \ + NetworkingPluginTest class rhbz1928628(NetworkingPluginTest):