diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 63f1aacb979d..6f635edbfa47 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -267,7 +267,7 @@ jobs: - NAME: postgres CFG: compile-gcc COMPILER: gcc - TEST_DB_PROVIDER: postgres + TEST_DB_PROVIDER: system-postgres TEST_NETWORK: regtest # And don't forget about elements (like cdecker did when # reworking the CI...) @@ -290,10 +290,46 @@ jobs: COMPILER: gcc TEST_NETWORK: regtest EXPERIMENTAL_SPLICING: 1 + services: + postgres: + # Docker Hub image + image: postgres + # Provide the password for postgres + env: + POSTGRES_PASSWORD: postgres + POSTGRES_INITDB_ARGS: "-c max_connections=2000" + ports: + - "5432:5432" + # Set health checks to wait until postgres has started + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 steps: - name: Checkout uses: actions/checkout@v4 + - name: Check disk space pre-cleanup + run: df -h + - name: Free up disk space + # We need more space than GHA usually provides when testing + # against the `postgres` service. This step frees some space + # on the volume containing tools, in the hope of freeing + # enough space for postgres tests to succeed. + uses: jlumbroso/free-disk-space@main + with: + # Enable all cleanup options + tool-cache: true + android: true + dotnet: true + haskell: true + swift: true + golang: true + php: true + - name: Check disk space pre-cleanup + run: df -h + - name: Set up Python 3.10 uses: actions/setup-python@v5 with: @@ -340,6 +376,8 @@ jobs: TEST_DB_PROVIDER: ${{ matrix.TEST_DB_PROVIDER }} TEST_NETWORK: ${{ matrix.TEST_NETWORK }} LIGHTNINGD_POSTGRES_NO_VACUUM: 1 + # Yes, that DSN is silly :-) It's only used for the TEST_DB_PROVIDER=system-postgres + CLN_TEST_POSTGRES_DSN: "postgres://postgres:postgres@127.0.0.1:5432/postgres" run: | env cat config.vars diff --git a/contrib/pyln-testing/pyln/testing/db.py b/contrib/pyln-testing/pyln/testing/db.py index 7930e6a37d71..fc716e4d43f8 100644 --- a/contrib/pyln-testing/pyln/testing/db.py +++ b/contrib/pyln-testing/pyln/testing/db.py @@ -12,6 +12,7 @@ import subprocess import time from typing import Dict, List, Optional, Union +from urllib.parse import urlparse class BaseDb(object): @@ -25,11 +26,18 @@ def __init__(self, path: str) -> None: self.provider = None def get_dsn(self) -> None: - """SQLite3 doesn't provide a DSN, resulting in no CLI-option. - """ + """SQLite3 doesn't provide a DSN, resulting in no CLI-option.""" return None - def query(self, query: str) -> Union[List[Dict[str, Union[int, bytes]]], List[Dict[str, Optional[int]]], List[Dict[str, str]], List[Dict[str, Union[str, int]]], List[Dict[str, int]]]: + def query( + self, query: str + ) -> Union[ + List[Dict[str, Union[int, bytes]]], + List[Dict[str, Optional[int]]], + List[Dict[str, str]], + List[Dict[str, Union[str, int]]], + List[Dict[str, int]], + ]: orig = os.path.join(self.path) copy = self.path + ".copy" shutil.copyfile(orig, copy) @@ -68,22 +76,23 @@ def wipe_db(self): class PostgresDb(BaseDb): - def __init__(self, dbname, port): + def __init__(self, dbname, hostname, port, username, password): self.dbname = dbname self.port = port self.provider = None + self.hostname = hostname + self.username = username + self.password = password - self.conn = psycopg2.connect("dbname={dbname} user=postgres host=localhost port={port}".format( - dbname=dbname, port=port - )) + self.conn = psycopg2.connect( + f"dbname={dbname} user={username} password={password} host={hostname} port={port}" + ) cur = self.conn.cursor() - cur.execute('SELECT 1') + cur.execute("SELECT 1") cur.close() def get_dsn(self): - return "postgres://postgres:password@localhost:{port}/{dbname}".format( - port=self.port, dbname=self.dbname - ) + return f"postgres://{self.username}:{self.password}@{self.hostname}:{self.port}/{self.dbname}" def query(self, query): cur = self.conn.cursor() @@ -105,13 +114,15 @@ def execute(self, query): cur.execute(query) def stop(self): - """Clean up the database. - """ + """Clean up the database.""" self.conn.close() - conn = psycopg2.connect("dbname=postgres user=postgres host=localhost port={self.port}") + conn = psycopg2.connect( + f"dbname=postgres user={self.username} host={self.hostname} password={self.password} port={self.port}" + ) cur = conn.cursor() cur.execute("DROP DATABASE {};".format(self.dbname)) cur.close() + conn.close() def wipe_db(self): cur = self.conn.cursor() @@ -128,10 +139,7 @@ def start(self) -> None: pass def get_db(self, node_directory: str, testname: str, node_id: int) -> Sqlite3Db: - path = os.path.join( - node_directory, - 'lightningd.sqlite3' - ) + path = os.path.join(node_directory, "lightningd.sqlite3") return Sqlite3Db(path) def stop(self) -> None: @@ -143,69 +151,92 @@ def __init__(self, directory): self.directory = directory self.port = None self.proc = None + self.hostname = "127.0.0.1" # We default to localhost, but can be overridden + self.username = "postgres" + self.password = "postgres" + print("Starting PostgresDbProvider") def locate_path(self): # Use `pg_config` to determine correct PostgreSQL installation - pg_config = shutil.which('pg_config') + pg_config = shutil.which("pg_config") if not pg_config: - raise ValueError("Could not find `pg_config` to determine PostgreSQL binaries. Is PostgreSQL installed?") + raise ValueError( + "Could not find `pg_config` to determine PostgreSQL binaries. Is PostgreSQL installed?" + ) - bindir = subprocess.check_output([pg_config, '--bindir']).decode().rstrip() + bindir = subprocess.check_output([pg_config, "--bindir"]).decode().rstrip() if not os.path.isdir(bindir): - raise ValueError("Error: `pg_config --bindir` didn't return a proper path: {}".format(bindir)) - - initdb = os.path.join(bindir, 'initdb') - postgres = os.path.join(bindir, 'postgres') + raise ValueError( + "Error: `pg_config --bindir` didn't return a proper path: {}".format( + bindir + ) + ) + + initdb = os.path.join(bindir, "initdb") + postgres = os.path.join(bindir, "postgres") if os.path.isfile(initdb) and os.path.isfile(postgres): if os.access(initdb, os.X_OK) and os.access(postgres, os.X_OK): logging.info("Found `postgres` and `initdb` in {}".format(bindir)) return initdb, postgres - raise ValueError("Could not find `postgres` and `initdb` binaries in {}".format(bindir)) + raise ValueError( + "Could not find `postgres` and `initdb` binaries in {}".format(bindir) + ) def start(self): passfile = os.path.join(self.directory, "pgpass.txt") # Need to write a tiny file containing the password so `initdb` can # pick it up - with open(passfile, 'w') as f: - f.write('cltest\n') + with open(passfile, "w") as f: + f.write("cltest\n") # Look for a postgres directory that isn't taken yet. Not locking # since this is run in a single-threaded context, at the start of each # test. Multiple workers have separate directories, so they can't # trample each other either. for i in itertools.count(): - self.pgdir = os.path.join(self.directory, 'pgsql-{}'.format(i)) + self.pgdir = os.path.join(self.directory, "pgsql-{}".format(i)) if not os.path.exists(self.pgdir): break initdb, postgres = self.locate_path() - subprocess.check_call([ - initdb, - '--pwfile={}'.format(passfile), - '--pgdata={}'.format(self.pgdir), - '--auth=trust', - '--username=postgres', - ]) - conffile = os.path.join(self.pgdir, 'postgresql.conf') - with open(conffile, 'a') as f: - f.write('max_connections = 1000\nshared_buffers = 240MB\n') + subprocess.check_call( + [ + initdb, + "--pwfile={}".format(passfile), + "--pgdata={}".format(self.pgdir), + "--auth=trust", + "--username=postgres", + ] + ) + conffile = os.path.join(self.pgdir, "postgresql.conf") + with open(conffile, "a") as f: + f.write("max_connections = 1000\nshared_buffers = 240MB\n") self.port = reserve_unused_port() - self.proc = subprocess.Popen([ - postgres, - '-k', '/tmp/', # So we don't use /var/lib/... - '-D', self.pgdir, - '-p', str(self.port), - '-F', - '-i', - ]) + self.proc = subprocess.Popen( + [ + postgres, + "-k", + "/tmp/", # So we don't use /var/lib/... + "-D", + self.pgdir, + "-p", + str(self.port), + "-F", + "-i", + ] + ) # Hacky but seems to work ok (might want to make the postgres proc a # TailableProc as well if too flaky). for i in range(30): try: - self.conn = psycopg2.connect("dbname=template1 user=postgres host=localhost port={}".format(self.port)) + self.conn = psycopg2.connect( + "dbname=template1 user=postgres host=localhost port={}".format( + self.port + ) + ) break except Exception: time.sleep(0.5) @@ -215,13 +246,15 @@ def start(self): def get_db(self, node_directory, testname, node_id): # Random suffix to avoid collisions on repeated tests - nonce = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8)) + nonce = "".join( + random.choice(string.ascii_lowercase + string.digits) for _ in range(8) + ) dbname = "{}_{}_{}".format(testname, node_id, nonce) cur = self.conn.cursor() cur.execute("CREATE DATABASE {};".format(dbname)) cur.close() - db = PostgresDb(dbname, self.port) + db = PostgresDb(dbname, self.hostname, self.port, self.username, self.password) return db def stop(self): @@ -241,3 +274,49 @@ def stop(self): self.proc.wait() shutil.rmtree(self.pgdir) drop_unused_port(self.port) + + +class SystemPostgresDbProvider(PostgresDbProvider): + """A DB provider that uses an externally controlled postgres instance. + + Spinning postgres instances up and down is costly. We are keeping + tests separate by assigning them random names, so we can share a + single DB cluster. This provider does just that: it talks to an + externally managed cluster, creates and deletes DBs on demand, but + does not manage the cluster's lifecycle. + + The external cluster to talk to can be specified via the + `CLN_TEST_POSTGRES_DSN` environment variable. + + Please make sure that the user specified in the DSN has the + permission to create new DBs. + + Since tests, may end up interrupted, and may not clean up the + databases they created, be aware that over time your cluster may + accumulate quite a few databases. This mode is mostly intended for + CI where a throwaway postgre cluster can be spun up and tested + against. + + """ + + def __init__(self, directory): + self.dsn = os.environ.get("CLN_TEST_POSTGRES_DSN") + self.conn = None + parts = urlparse(self.dsn) + + self.hostname = parts.hostname + self.username = parts.username + self.password = parts.password if parts.password else "" + self.port = parts.port if parts.port else 5432 + self.dbname = parts.path + + def stop(self): + pass + + def start(self): + self.conn = psycopg2.connect(self.dsn) + cur = self.conn.cursor() + cur.execute("SELECT 1") + cur.close() + # Required for CREATE DATABASE to work + self.conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) diff --git a/contrib/pyln-testing/pyln/testing/fixtures.py b/contrib/pyln-testing/pyln/testing/fixtures.py index c952c97bfdda..6071b334b065 100644 --- a/contrib/pyln-testing/pyln/testing/fixtures.py +++ b/contrib/pyln-testing/pyln/testing/fixtures.py @@ -1,6 +1,18 @@ from concurrent import futures -from pyln.testing.db import SqliteDbProvider, PostgresDbProvider -from pyln.testing.utils import NodeFactory, BitcoinD, ElementsD, env, LightningNode, TEST_DEBUG, TEST_NETWORK +from pyln.testing.db import ( + SqliteDbProvider, + PostgresDbProvider, + SystemPostgresDbProvider, +) +from pyln.testing.utils import ( + NodeFactory, + BitcoinD, + ElementsD, + env, + LightningNode, + TEST_DEBUG, + TEST_NETWORK, +) from pyln.client import Millisatoshi from typing import Dict @@ -27,7 +39,7 @@ def test_base_dir(): d = os.getenv("TEST_DIR", "/tmp") - directory = tempfile.mkdtemp(prefix='ltests-', dir=d) + directory = tempfile.mkdtemp(prefix="ltests-", dir=d) print("Running tests in {}".format(directory)) yield directory @@ -35,13 +47,19 @@ def test_base_dir(): # Now check if any test directory is left because the corresponding test # failed. If there are no such tests we can clean up the root test # directory. - contents = [d for d in os.listdir(directory) if os.path.isdir(os.path.join(directory, d)) and d.startswith('test_')] + contents = [ + d + for d in os.listdir(directory) + if os.path.isdir(os.path.join(directory, d)) and d.startswith("test_") + ] if contents == []: shutil.rmtree(directory) else: - print("Leaving base_dir {} intact, it still has test sub-directories with failure details: {}".format( - directory, contents - )) + print( + "Leaving base_dir {} intact, it still has test sub-directories with failure details: {}".format( + directory, contents + ) + ) @pytest.fixture(autouse=True) @@ -62,7 +80,7 @@ def setup_logging(): loggers = [logging.getLogger()] + list(logging.Logger.manager.loggerDict.values()) for logger in loggers: - handlers = getattr(logger, 'handlers', []) + handlers = getattr(logger, "handlers", []) for handler in handlers: logger.removeHandler(handler) @@ -77,7 +95,9 @@ def directory(request, test_base_dir, test_name): global __attempts # Auto set value if it isn't in the dict yet __attempts[test_name] = __attempts.get(test_name, 0) + 1 - directory = os.path.join(test_base_dir, "{}_{}".format(test_name, __attempts[test_name])) + directory = os.path.join( + test_base_dir, "{}_{}".format(test_name, __attempts[test_name]) + ) request.node.has_errors = False if not os.path.exists(directory): @@ -89,9 +109,9 @@ def directory(request, test_base_dir, test_name): # determine whether we succeeded or failed. Outcome can be None if the # failure occurs during the setup phase, hence the use to getattr instead # of accessing it directly. - rep_call = getattr(request.node, 'rep_call', None) - outcome = 'passed' if rep_call is None else rep_call.outcome - failed = not outcome or request.node.has_errors or outcome != 'passed' + rep_call = getattr(request.node, "rep_call", None) + outcome = "passed" if rep_call is None else rep_call.outcome + failed = not outcome or request.node.has_errors or outcome != "passed" if not failed: try: @@ -99,13 +119,19 @@ def directory(request, test_base_dir, test_name): except OSError: # Usually, this means that e.g. valgrind is still running. Wait # a little and retry. - files = [os.path.join(dp, f) for dp, dn, fn in os.walk(directory) for f in fn] + files = [ + os.path.join(dp, f) for dp, dn, fn in os.walk(directory) for f in fn + ] print("Directory still contains files: ", files) print("... sleeping then retrying") time.sleep(10) shutil.rmtree(directory) else: - logging.debug("Test execution failed, leaving the test directory {} intact.".format(directory)) + logging.debug( + "Test execution failed, leaving the test directory {} intact.".format( + directory + ) + ) @pytest.fixture @@ -114,8 +140,8 @@ def test_name(request): network_daemons = { - 'regtest': BitcoinD, - 'liquid-regtest': ElementsD, + "regtest": BitcoinD, + "liquid-regtest": ElementsD, } @@ -126,7 +152,7 @@ def node_cls(): @pytest.fixture def bitcoind(directory, teardown_checks): - chaind = network_daemons[env('TEST_NETWORK', 'regtest')] + chaind = network_daemons[env("TEST_NETWORK", "regtest")] bitcoind = chaind(bitcoin_dir=directory) try: @@ -139,20 +165,24 @@ def bitcoind(directory, teardown_checks): # FIXME: include liquid-regtest in this check after elementsd has been # updated - if info['version'] < 200100 and env('TEST_NETWORK') != 'liquid-regtest': + if info["version"] < 200100 and env("TEST_NETWORK") != "liquid-regtest": bitcoind.rpc.stop() - raise ValueError("bitcoind is too old. At least version 20100 (v0.20.1)" - " is needed, current version is {}".format(info['version'])) - elif info['version'] < 160000: + raise ValueError( + "bitcoind is too old. At least version 20100 (v0.20.1)" + " is needed, current version is {}".format(info["version"]) + ) + elif info["version"] < 160000: bitcoind.rpc.stop() - raise ValueError("elementsd is too old. At least version 160000 (v0.16.0)" - " is needed, current version is {}".format(info['version'])) + raise ValueError( + "elementsd is too old. At least version 160000 (v0.16.0)" + " is needed, current version is {}".format(info["version"]) + ) info = bitcoind.rpc.getblockchaininfo() # Make sure we have some spendable funds - if info['blocks'] < 101: - bitcoind.generate_block(101 - info['blocks']) - elif bitcoind.rpc.getwalletinfo()['balance'] < 1: + if info["blocks"] < 101: + bitcoind.generate_block(101 - info["blocks"]) + elif bitcoind.rpc.getwalletinfo()["balance"] < 1: logging.debug("Insufficient balance, generating 1 block") bitcoind.generate_block(1) @@ -211,6 +241,7 @@ def teardown_checks(request): def _extra_validator(is_request: bool): """JSON Schema validator with additions for our specialized types""" + def is_hex(checker, instance): """Hex string""" if not checker.is_type(instance, "string"): @@ -265,9 +296,14 @@ def is_short_channel_id(checker, instance): # 2. the next 3 bytes: indicating the transaction index within the block # 3. the least significant 2 bytes: indicating the output index that pays to the # channel. - return (blocknum >= 0 and blocknum < 2**24 - and txnum >= 0 and txnum < 2**24 - and outnum >= 0 and outnum < 2**16) + return ( + blocknum >= 0 + and blocknum < 2**24 + and txnum >= 0 + and txnum < 2**24 + and outnum >= 0 + and outnum < 2**16 + ) def is_short_channel_id_dir(checker, instance): """Short channel id with direction""" @@ -300,7 +336,16 @@ def is_feerate(checker, instance): return False if instance in ("urgent", "normal", "slow", "minimum"): return True - if instance in ("opening", "mutual_close", "unilateral_close", "delayed_to_us", "htlc_resolution", "penalty", "min_acceptable", "max_acceptable"): + if instance in ( + "opening", + "mutual_close", + "unilateral_close", + "delayed_to_us", + "htlc_resolution", + "penalty", + "min_acceptable", + "max_acceptable", + ): return True if not instance.endswith("perkw") and not instance.endswith("perkb"): return False @@ -402,7 +447,7 @@ def is_sat_or_all(checker, instance): def is_currency(checker, instance): """currency including currency code""" - pattern = re.compile(r'^\d+(\.\d+)?[A-Z][A-Z][A-Z]$') + pattern = re.compile(r"^\d+(\.\d+)?[A-Z][A-Z][A-Z]$") if checker.is_type(instance, "string") and pattern.match(instance): return True return False @@ -412,61 +457,77 @@ def is_currency(checker, instance): is_msat = is_msat_request else: is_msat = is_msat_response - type_checker = jsonschema.Draft7Validator.TYPE_CHECKER.redefine_many({ - "hex": is_hex, - "hash": is_32byte_hex, - "secret": is_32byte_hex, - "u64": is_u64, - "u32": is_u32, - "u16": is_u16, - "u8": is_u8, - "pubkey": is_pubkey, - "sat": is_sat, - "sat_or_all": is_sat_or_all, - "msat": is_msat, - "msat_or_all": is_msat_or_all, - "msat_or_any": is_msat_or_any, - "currency": is_currency, - "txid": is_txid, - "signature": is_signature, - "bip340sig": is_bip340sig, - "short_channel_id": is_short_channel_id, - "short_channel_id_dir": is_short_channel_id_dir, - "outpoint": is_outpoint, - "feerate": is_feerate, - "outputdesc": is_outputdesc, - }) - - return jsonschema.validators.extend(jsonschema.Draft7Validator, - type_checker=type_checker) + type_checker = jsonschema.Draft7Validator.TYPE_CHECKER.redefine_many( + { + "hex": is_hex, + "hash": is_32byte_hex, + "secret": is_32byte_hex, + "u64": is_u64, + "u32": is_u32, + "u16": is_u16, + "u8": is_u8, + "pubkey": is_pubkey, + "sat": is_sat, + "sat_or_all": is_sat_or_all, + "msat": is_msat, + "msat_or_all": is_msat_or_all, + "msat_or_any": is_msat_or_any, + "currency": is_currency, + "txid": is_txid, + "signature": is_signature, + "bip340sig": is_bip340sig, + "short_channel_id": is_short_channel_id, + "short_channel_id_dir": is_short_channel_id_dir, + "outpoint": is_outpoint, + "feerate": is_feerate, + "outputdesc": is_outputdesc, + } + ) + + return jsonschema.validators.extend( + jsonschema.Draft7Validator, type_checker=type_checker + ) def _load_schema(filename): """Load the schema from @filename and create a validator for it""" - with open(filename, 'r') as f: + with open(filename, "r") as f: data = json.load(f) - return [_extra_validator(True)(data.get('request', {})), _extra_validator(False)(data.get('response', {}))] + return [ + _extra_validator(True)(data.get("request", {})), + _extra_validator(False)(data.get("response", {})), + ] @pytest.fixture(autouse=True) def jsonschemas(): """Load schema file if it exist: returns request/response schemas by pairs""" try: - schemafiles = os.listdir('doc/schemas') + schemafiles = os.listdir("doc/schemas") except FileNotFoundError: schemafiles = [] schemas = {} for fname in schemafiles: - if fname.endswith('.json'): - base = fname.replace('lightning-', '').replace('.json', '') + if fname.endswith(".json"): + base = fname.replace("lightning-", "").replace(".json", "") # Request is 0 and Response is 1 - schemas[base] = _load_schema(os.path.join('doc/schemas', fname)) + schemas[base] = _load_schema(os.path.join("doc/schemas", fname)) return schemas @pytest.fixture -def node_factory(request, directory, test_name, bitcoind, executor, db_provider, teardown_checks, node_cls, jsonschemas): +def node_factory( + request, + directory, + test_name, + bitcoind, + executor, + db_provider, + teardown_checks, + node_cls, + jsonschemas, +): nf = NodeFactory( request, test_name, @@ -495,7 +556,11 @@ def map_node_error(nodes, f, msg): map_node_error(nf.nodes, printValgrindErrors, "reported valgrind errors") map_node_error(nf.nodes, printCrashLog, "had crash.log files") map_node_error(nf.nodes, checkBroken, "had BROKEN messages") - map_node_error(nf.nodes, lambda n: not n.allow_warning and n.daemon.is_in_log(r' WARNING:'), "had warning messages") + map_node_error( + nf.nodes, + lambda n: not n.allow_warning and n.daemon.is_in_log(r" WARNING:"), + "had warning messages", + ) map_node_error(nf.nodes, checkReconnect, "had unexpected reconnections") # On any bad gossip complaints, print out every nodes' gossip_store @@ -503,20 +568,36 @@ def map_node_error(nodes, f, msg): for n in nf.nodes: dumpGossipStore(n) - map_node_error(nf.nodes, lambda n: n.daemon.is_in_log('Bad reestablish'), "had bad reestablish") - map_node_error(nf.nodes, lambda n: n.daemon.is_in_log('bad hsm request'), "had bad hsm requests") - map_node_error(nf.nodes, lambda n: n.daemon.is_in_log(r'Accessing a null column'), "Accessing a null column") + map_node_error( + nf.nodes, lambda n: n.daemon.is_in_log("Bad reestablish"), "had bad reestablish" + ) + map_node_error( + nf.nodes, + lambda n: n.daemon.is_in_log("bad hsm request"), + "had bad hsm requests", + ) + map_node_error( + nf.nodes, + lambda n: n.daemon.is_in_log(r"Accessing a null column"), + "Accessing a null column", + ) map_node_error(nf.nodes, checkMemleak, "had memleak messages") - map_node_error(nf.nodes, lambda n: n.rc != 0 and not n.may_fail, "Node exited with return code {n.rc}") + map_node_error( + nf.nodes, + lambda n: n.rc != 0 and not n.may_fail, + "Node exited with return code {n.rc}", + ) if not ok: - map_node_error(nf.nodes, prinErrlog, "some node failed unexpected, non-empty errlog file") + map_node_error( + nf.nodes, prinErrlog, "some node failed unexpected, non-empty errlog file" + ) def getErrlog(node): for error_file in os.listdir(node.daemon.lightning_dir): if not re.fullmatch(r"errlog", error_file): continue - with open(os.path.join(node.daemon.lightning_dir, error_file), 'r') as f: + with open(os.path.join(node.daemon.lightning_dir, error_file), "r") as f: errors = f.read().strip() if errors: return errors, error_file @@ -526,7 +607,11 @@ def getErrlog(node): def prinErrlog(node): errors, fname = getErrlog(node) if errors: - print("-" * 31, "stderr of node {} captured in {} file".format(node.daemon.prefix, fname), "-" * 32) + print( + "-" * 31, + "stderr of node {} captured in {} file".format(node.daemon.prefix, fname), + "-" * 32, + ) print(errors) print("-" * 80) return 1 if errors else 0 @@ -536,7 +621,7 @@ def getValgrindErrors(node): for error_file in os.listdir(node.daemon.lightning_dir): if not re.fullmatch(r"valgrind-errors.\d+", error_file): continue - with open(os.path.join(node.daemon.lightning_dir, error_file), 'r') as f: + with open(os.path.join(node.daemon.lightning_dir, error_file), "r") as f: errors = f.read().strip() if errors: return errors, error_file @@ -557,8 +642,8 @@ def getCrashLog(node): if node.may_fail: return None, None try: - crashlog = os.path.join(node.daemon.lightning_dir, 'crash.log') - with open(crashlog, 'r') as f: + crashlog = os.path.join(node.daemon.lightning_dir, "crash.log") + with open(crashlog, "r") as f: return f.readlines(), crashlog except Exception: return None, None @@ -576,15 +661,17 @@ def printCrashLog(node): def checkReconnect(node): if node.may_reconnect: return 0 - if node.daemon.is_in_log('Peer has reconnected'): + if node.daemon.is_in_log("Peer has reconnected"): return 1 return 0 def dumpGossipStore(node): - gs_path = os.path.join(node.daemon.lightning_dir, TEST_NETWORK, 'gossip_store') - gs = subprocess.run(['devtools/dump-gossipstore', '--print-deleted', gs_path], - stdout=subprocess.PIPE) + gs_path = os.path.join(node.daemon.lightning_dir, TEST_NETWORK, "gossip_store") + gs = subprocess.run( + ["devtools/dump-gossipstore", "--print-deleted", gs_path], + stdout=subprocess.PIPE, + ) print("GOSSIP STORE CONTENTS for {}:\n".format(node.daemon.prefix)) print(gs.stdout.decode()) @@ -593,22 +680,22 @@ def checkBadGossip(node): if node.allow_bad_gossip: return 0 # We can get bad gossip order from inside error msgs. - if node.daemon.is_in_log('Bad gossip order:'): + if node.daemon.is_in_log("Bad gossip order:"): # This can happen if a node sees a node_announce after a channel # is deleted, however. - if node.daemon.is_in_log('Deleting channel'): + if node.daemon.is_in_log("Deleting channel"): return 0 return 1 # Other 'Bad' messages shouldn't happen. - if node.daemon.is_in_log(r'gossipd.*Bad (?!gossip order from error)'): + if node.daemon.is_in_log(r"gossipd.*Bad (?!gossip order from error)"): return 1 return 0 def checkBroken(node): node.daemon.logs_catchup() - broken_lines = [l for l in node.daemon.logs if '**BROKEN**' in l] + broken_lines = [l for l in node.daemon.logs if "**BROKEN**" in l] if node.broken_log: ex = re.compile(node.broken_log) broken_lines = [l for l in broken_lines if not ex.search(l)] @@ -619,33 +706,34 @@ def checkBroken(node): def checkBadReestablish(node): - if node.daemon.is_in_log('Bad reestablish'): + if node.daemon.is_in_log("Bad reestablish"): return 1 return 0 def checkBadHSMRequest(node): - if node.daemon.is_in_log('bad hsm request'): + if node.daemon.is_in_log("bad hsm request"): return 1 return 0 def checkMemleak(node): - if node.daemon.is_in_log('MEMLEAK:'): + if node.daemon.is_in_log("MEMLEAK:"): return 1 return 0 # Mapping from TEST_DB_PROVIDER env variable to class to be used providers = { - 'sqlite3': SqliteDbProvider, - 'postgres': PostgresDbProvider, + "sqlite3": SqliteDbProvider, + "postgres": PostgresDbProvider, + "system-postgres": SystemPostgresDbProvider, } @pytest.fixture def db_provider(test_base_dir): - provider = providers[os.getenv('TEST_DB_PROVIDER', 'sqlite3')](test_base_dir) + provider = providers[os.getenv("TEST_DB_PROVIDER", "sqlite3")](test_base_dir) provider.start() yield provider provider.stop() @@ -662,29 +750,29 @@ def executor(teardown_checks): def chainparams(): """Return the chainparams for the TEST_NETWORK. - - chain_hash is in network byte order, not the RPC return order. - - example_addr doesn't belong to any node in the test (randomly generated) + - chain_hash is in network byte order, not the RPC return order. + - example_addr doesn't belong to any node in the test (randomly generated) """ chainparams = { - 'regtest': { + "regtest": { "bip173_prefix": "bcrt", "elements": False, "name": "regtest", - "p2sh_prefix": '2', + "p2sh_prefix": "2", "example_addr": "bcrt1qeyyk6sl5pr49ycpqyckvmttus5ttj25pd0zpvg", "feeoutput": False, - "chain_hash": '06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f', + "chain_hash": "06226e46111a0b59caaf126043eb5bbf28c34f3a5e332a1fc7b2b73cf188910f", }, - 'liquid-regtest': { + "liquid-regtest": { "bip173_prefix": "ert", "elements": True, "name": "liquid-regtest", - "p2sh_prefix": 'X', + "p2sh_prefix": "X", "example_addr": "ert1qjsesxflhs3632syhcz7llpfx20p5tr0kpllfve", "feeoutput": True, "chain_hash": "9f87eb580b9e5f11dc211e9fb66abb3699999044f8fe146801162393364286c6", - } + }, } - return chainparams[env('TEST_NETWORK', 'regtest')] + return chainparams[env("TEST_NETWORK", "regtest")]