diff --git a/node_cli/cli/node.py b/node_cli/cli/node.py index ff781249..fe826a58 100644 --- a/node_cli/cli/node.py +++ b/node_cli/cli/node.py @@ -239,12 +239,13 @@ def check(network): run_checks(network) -@node.command(help='Reconfigure iptables rules') +@node.command(help='Reconfigure nftables rules') +@click.option('--monitoring', is_flag=True) @click.option('--yes', is_flag=True, callback=abort_if_false, expose_value=False, prompt='Are you sure you want to reconfigure firewall rules?') -def configure_firewall(): - configure_firewall_rules() +def configure_firewall(monitoring): + configure_firewall_rules(enable_monitoring=monitoring) @node.command(help='Show node version information') diff --git a/node_cli/core/checks.py b/node_cli/core/checks.py index b4902b57..a5e85c70 100644 --- a/node_cli/core/checks.py +++ b/node_cli/core/checks.py @@ -422,7 +422,7 @@ def docker_compose(self) -> CheckResult: return self._failed(name=name, info=info) v_cmd_result = run_cmd( - ['docker compose', 'version'], + ['docker', 'compose', 'version'], check_code=False, separate_stderr=True ) diff --git a/node_cli/core/nftables.py b/node_cli/core/nftables.py index fe3dfc77..b7db5bb7 100644 --- a/node_cli/core/nftables.py +++ b/node_cli/core/nftables.py @@ -287,7 +287,7 @@ def add_loopback_rule(self, chain) -> None: else: logger.info('Loopback rule already exists in chain %s', chain) - def setup_firewall(self) -> None: + def setup_firewall(self, enable_monitoring: bool = False) -> None: """Setup firewall rules""" try: self.create_table_if_not_exists() @@ -306,6 +306,8 @@ def setup_firewall(self) -> None: self.add_connection_tracking_rule(self.chain) tcp_ports = [get_ssh_port(), 8080, 443, 53, 3009, 9100] + if enable_monitoring: + tcp_ports.extend([8080, 9100]) for port in tcp_ports: self.add_rule_if_not_exists(Rule(chain=self.chain, protocol='tcp', port=port)) @@ -330,7 +332,7 @@ def setup_firewall(self) -> None: raise NFTablesError(e) -def configure_nftables() -> None: +def configure_nftables(enable_monitoring: bool = False) -> None: nft_mgr = NFTablesManager() - nft_mgr.setup_firewall() + nft_mgr.setup_firewall(enable_monitoring=enable_monitoring) logger.info('Firewall setup completed successfully') diff --git a/node_cli/core/node.py b/node_cli/core/node.py index a805cb8d..b728b5dc 100644 --- a/node_cli/core/node.py +++ b/node_cli/core/node.py @@ -39,16 +39,14 @@ SCHAINS_MNT_DIR_SYNC, SKALE_DIR, SKALE_STATE_DIR, - TM_INIT_TIMEOUT + TM_INIT_TIMEOUT, ) from node_cli.cli import __version__ from node_cli.configs.env import get_env_config from node_cli.configs.cli_logger import LOG_DATA_PATH as CLI_LOG_DATA_PATH from node_cli.core.nftables import configure_nftables -from node_cli.core.host import ( - is_node_inited, save_env_params, get_flask_secret_key -) +from node_cli.core.host import is_node_inited, save_env_params, get_flask_secret_key from node_cli.core.checks import run_checks as run_host_checks from node_cli.core.resources import update_resource_allocation from node_cli.operations import ( @@ -59,21 +57,24 @@ restore_op, init_sync_op, repair_sync_op, - update_sync_op + update_sync_op, ) from node_cli.utils.print_formatters import ( - print_failed_requirements_checks, print_node_cmd_error, print_node_info + print_failed_requirements_checks, + print_node_cmd_error, + print_node_info, +) +from node_cli.utils.helper import ( + error_exit, + get_request, + post_request, + extract_env_params, + str_to_bool, ) -from node_cli.utils.helper import error_exit, get_request, post_request -from node_cli.utils.helper import extract_env_params from node_cli.utils.meta import get_meta_info from node_cli.utils.texts import Texts from node_cli.utils.exit_codes import CLIExitCodes -from node_cli.utils.decorators import ( - check_not_inited, - check_inited, - check_user -) +from node_cli.utils.decorators import check_not_inited, check_inited, check_user from node_cli.migrations.focal_to_jammy import migrate as migrate_2_6 @@ -87,6 +88,7 @@ class NodeStatuses(Enum): """This class contains possible node statuses""" + ACTIVE = 0 LEAVING = 1 FROZEN = 2 @@ -107,9 +109,7 @@ def is_update_safe() -> bool: @check_inited @check_user -def register_node(name, p2p_ip, - public_ip, port, domain_name): - +def register_node(name, p2p_ip, public_ip, port, domain_name): if not is_node_inited(): print(TEXTS['node']['not_inited']) return @@ -120,13 +120,9 @@ def register_node(name, p2p_ip, 'ip': p2p_ip, 'publicIP': public_ip, 'port': port, - 'domain_name': domain_name + 'domain_name': domain_name, } - status, payload = post_request( - blueprint=BLUEPRINT_NAME, - method='register', - json=json_data - ) + status, payload = post_request(blueprint=BLUEPRINT_NAME, method='register', json=json_data) if status == 'ok': msg = TEXTS['node']['registered'] logger.info(msg) @@ -142,20 +138,16 @@ def init(env_filepath): env = get_node_env(env_filepath) if env is None: return - configure_firewall_rules() + + enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False')) + configure_firewall_rules(enable_monitoring=enable_monitoring) inited_ok = init_op(env_filepath, env) if not inited_ok: - error_exit( - 'Init operation failed', - exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR - ) + error_exit('Init operation failed', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) logger.info('Waiting for containers initialization') time.sleep(TM_INIT_TIMEOUT) if not is_base_containers_alive(): - error_exit( - 'Containers are not running', - exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR - ) + error_exit('Containers are not running', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) logger.info('Generating resource allocation file ...') update_resource_allocation(env['ENV_TYPE']) logger.info('Init procedure finished') @@ -173,49 +165,31 @@ def restore(backup_path, env_filepath, no_snapshot=False, config_only=False): logger.info('Adding BACKUP_RUN to env ...') env['BACKUP_RUN'] = 'True' # should be str - configure_firewall_rules() + enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False')) + configure_firewall_rules(enable_monitoring=enable_monitoring) restored_ok = restore_op(env, backup_path, config_only=config_only) if not restored_ok: - error_exit( - 'Restore operation failed', - exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR - ) + error_exit('Restore operation failed', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) time.sleep(RESTORE_SLEEP_TIMEOUT) logger.info('Generating resource allocation file ...') update_resource_allocation(env['ENV_TYPE']) print('Node is restored from backup') -def init_sync( - env_filepath: str, - archive: bool, - historic_state: bool, - snapshot_from: str -) -> None: - configure_firewall_rules() +def init_sync(env_filepath: str, archive: bool, historic_state: bool, snapshot_from: str) -> None: env = get_node_env(env_filepath, sync_node=True) if env is None: return - inited_ok = init_sync_op( - env_filepath, - env, - archive, - historic_state, - snapshot_from - ) + enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False')) + configure_firewall_rules(enable_monitoring=enable_monitoring) + inited_ok = init_sync_op(env_filepath, env, archive, historic_state, snapshot_from) if not inited_ok: - error_exit( - 'Init operation failed', - exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR - ) + error_exit('Init operation failed', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) logger.info('Waiting for containers initialization') time.sleep(TM_INIT_TIMEOUT) if not is_base_containers_alive(sync_node=True): - error_exit( - 'Containers are not running', - exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR - ) + error_exit('Containers are not running', exit_code=CLIExitCodes.OPERATION_EXECUTION_ERROR) logger.info('Sync node initialized successfully') @@ -226,8 +200,9 @@ def update_sync(env_filepath: str, unsafe_ok: bool = False) -> None: prev_version = get_meta_info()['version'] if (__version__ == 'test' or __version__.startswith('2.6')) and prev_version == '2.5.0': migrate_2_6() - configure_firewall_rules() env = get_node_env(env_filepath, sync_node=True) + enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False')) + configure_firewall_rules(enable_monitoring=enable_monitoring) update_ok = update_sync_op(env_filepath, env) if update_ok: logger.info('Waiting for containers initialization') @@ -242,36 +217,23 @@ def update_sync(env_filepath: str, unsafe_ok: bool = False) -> None: @check_inited @check_user -def repair_sync( - archive: bool, - historic_state: bool, - snapshot_from: str -) -> None: - +def repair_sync(archive: bool, historic_state: bool, snapshot_from: str) -> None: env_params = extract_env_params(INIT_ENV_FILEPATH, sync_node=True) schain_name = env_params['SCHAIN_NAME'] repair_sync_op( schain_name=schain_name, archive=archive, historic_state=historic_state, - snapshot_from=snapshot_from + snapshot_from=snapshot_from, ) logger.info('Schain was started from scratch') def get_node_env( - env_filepath, - inited_node=False, - sync_schains=None, - pull_config_for_schain=None, - sync_node=False + env_filepath, inited_node=False, sync_schains=None, pull_config_for_schain=None, sync_node=False ): if env_filepath is not None: - env_params = extract_env_params( - env_filepath, - sync_node=sync_node, - raise_for_status=True - ) + env_params = extract_env_params(env_filepath, sync_node=sync_node, raise_for_status=True) save_env_params(env_filepath) else: env_params = extract_env_params(INIT_ENV_FILEPATH, sync_node=sync_node) @@ -282,7 +244,7 @@ def get_node_env( 'SCHAINS_MNT_DIR': mnt_dir, 'FILESTORAGE_MAPPING': FILESTORAGE_MAPPING, 'SKALE_LIB_PATH': SKALE_STATE_DIR, - **env_params + **env_params, } if inited_node and not sync_node: flask_secret_key = get_flask_secret_key() @@ -305,13 +267,14 @@ def update(env_filepath: str, pull_config_for_schain: str, unsafe_ok: bool = Fal if (__version__ == 'test' or __version__.startswith('2.6')) and prev_version == '2.5.0': migrate_2_6() logger.info('Node update started') - configure_firewall_rules() env = get_node_env( env_filepath, inited_node=True, sync_schains=False, - pull_config_for_schain=pull_config_for_schain + pull_config_for_schain=pull_config_for_schain, ) + enable_monitoring = str_to_bool(env.get('MONITORING_CONTAINERS', 'False')) + configure_firewall_rules(enable_monitoring=enable_monitoring) update_ok = update_op(env_filepath, env) if update_ok: logger.info('Waiting for containers initialization') @@ -326,11 +289,7 @@ def update(env_filepath: str, pull_config_for_schain: str, unsafe_ok: bool = Fal def get_node_signature(validator_id): params = {'validator_id': validator_id} - status, payload = get_request( - blueprint=BLUEPRINT_NAME, - method='signature', - params=params - ) + status, payload = get_request(blueprint=BLUEPRINT_NAME, method='signature', params=params) if status == 'ok': return payload['signature'] else: @@ -343,7 +302,7 @@ def backup(path): def get_backup_filename(): - time = datetime.datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S") + time = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S') return f'{BACKUP_ARCHIVE_NAME}-{time}.tar.gz' @@ -390,20 +349,13 @@ def create_backup_archive(backup_filepath): print('Creating backup archive...') cli_log_path = CLI_LOG_DATA_PATH container_log_path = LOG_PATH - pack_dir( - SKALE_DIR, - backup_filepath, - exclude=(cli_log_path, container_log_path) - ) + pack_dir(SKALE_DIR, backup_filepath, exclude=(cli_log_path, container_log_path)) print(f'Backup archive succesfully created {backup_filepath}') def set_maintenance_mode_on(): print('Setting maintenance mode on...') - status, payload = post_request( - blueprint=BLUEPRINT_NAME, - method='maintenance-on' - ) + status, payload = post_request(blueprint=BLUEPRINT_NAME, method='maintenance-on') if status == 'ok': msg = TEXTS['node']['maintenance_on'] logger.info(msg) @@ -416,10 +368,7 @@ def set_maintenance_mode_on(): def set_maintenance_mode_off(): print('Setting maintenance mode off...') - status, payload = post_request( - blueprint=BLUEPRINT_NAME, - method='maintenance-off' - ) + status, payload = post_request(blueprint=BLUEPRINT_NAME, method='maintenance-off') if status == 'ok': msg = TEXTS['node']['maintenance_off'] logger.info(msg) @@ -459,18 +408,13 @@ def turn_on(maintenance_off, sync_schains, env_file): def is_base_containers_alive(sync_node: bool = False): dclient = docker.from_env() containers = dclient.containers.list() - skale_containers = list(filter( - lambda c: c.name.startswith('skale_'), containers - )) + skale_containers = list(filter(lambda c: c.name.startswith('skale_'), containers)) containers_amount = SYNC_BASE_CONTAINERS_AMOUNT if sync_node else BASE_CONTAINERS_AMOUNT return len(skale_containers) >= containers_amount def get_node_info_plain(): - status, payload = get_request( - blueprint=BLUEPRINT_NAME, - method='info' - ) + status, payload = get_request(blueprint=BLUEPRINT_NAME, method='info') if status == 'ok': return payload['node_info'] else: @@ -484,10 +428,7 @@ def get_node_info(format): elif node_info['status'] == NodeStatuses.NOT_CREATED.value: print(TEXTS['service']['node_not_registered']) else: - print_node_info( - node_info, - get_node_status(int(node_info['status'])) - ) + print_node_info(node_info, get_node_status(int(node_info['status']))) def get_node_status(status): @@ -499,11 +440,7 @@ def get_node_status(status): def set_domain_name(domain_name): print(f'Setting new domain name: {domain_name}') status, payload = post_request( - blueprint=BLUEPRINT_NAME, - method='set-domain-name', - json={ - 'domain_name': domain_name - } + blueprint=BLUEPRINT_NAME, method='set-domain-name', json={'domain_name': domain_name} ) if status == 'ok': msg = TEXTS['node']['domain_name_changed'] @@ -516,7 +453,7 @@ def set_domain_name(domain_name): def run_checks( network: str = 'mainnet', container_config_path: str = CONTAINER_CONFIG_PATH, - disk: Optional[str] = None + disk: Optional[str] = None, ) -> None: if not is_node_inited(): print(TEXTS['node']['not_inited']) @@ -525,11 +462,7 @@ def run_checks( if disk is None: env = get_env_config() disk = env['DISK_MOUNTPOINT'] - failed_checks = run_host_checks( - disk, - network, - container_config_path - ) + failed_checks = run_host_checks(disk, network, container_config_path) if not failed_checks: print('Requirements checking succesfully finished!') else: @@ -537,7 +470,7 @@ def run_checks( print_failed_requirements_checks(failed_checks) -def configure_firewall_rules() -> None: +def configure_firewall_rules(enable_monitoring: bool = False) -> None: print('Configuring firewall ...') - configure_nftables() + configure_nftables(enable_monitoring=enable_monitoring) print('Done') diff --git a/node_cli/operations/base.py b/node_cli/operations/base.py index 2a27807b..2a2867a9 100644 --- a/node_cli/operations/base.py +++ b/node_cli/operations/base.py @@ -140,7 +140,7 @@ def update(env_filepath: str, env: Dict) -> None: distro.id(), distro.version() ) - update_images(env.get('CONTAINER_CONFIGS_DIR') != '') + update_images(env=env) compose_up(env) return True @@ -175,7 +175,7 @@ def init(env_filepath: str, env: dict) -> bool: distro.version() ) update_resource_allocation(env_type=env['ENV_TYPE']) - update_images(env.get('CONTAINER_CONFIGS_DIR') != '') + update_images(env=env) compose_up(env) return True @@ -231,7 +231,7 @@ def init_sync( if snapshot_from: update_node_cli_schain_status(schain_name, snapshot_from=snapshot_from) - update_images(env.get('CONTAINER_CONFIGS_DIR') != '', sync_node=True) + update_images(env=env, sync_node=True) compose_up(env, sync_node=True) return True @@ -273,7 +273,7 @@ def update_sync(env_filepath: str, env: Dict) -> bool: distro.id(), distro.version() ) - update_images(env.get('CONTAINER_CONFIGS_DIR') != '', sync_node=True) + update_images(env=env, sync_node=True) compose_up(env, sync_node=True) return True diff --git a/node_cli/operations/skale_node.py b/node_cli/operations/skale_node.py index b3745070..d91e4765 100644 --- a/node_cli/operations/skale_node.py +++ b/node_cli/operations/skale_node.py @@ -35,11 +35,12 @@ logger = logging.getLogger(__name__) -def update_images(local: bool = False, sync_node: bool = False) -> None: +def update_images(env: dict, sync_node: bool = False) -> None: + local = env.get('CONTAINER_CONFIGS_DIR') != '' if local: - compose_build(sync_node=sync_node) + compose_build(env=env, sync_node=sync_node) else: - compose_pull(sync_node=sync_node) + compose_pull(env=env, sync_node=sync_node) def download_skale_node(stream: Optional[str], src: Optional[str]) -> None: diff --git a/node_cli/utils/docker_utils.py b/node_cli/utils/docker_utils.py index f625e282..7a4957db 100644 --- a/node_cli/utils/docker_utils.py +++ b/node_cli/utils/docker_utils.py @@ -33,7 +33,6 @@ SYNC_COMPOSE_PATH, REMOVED_CONTAINERS_FOLDER_PATH, SGX_CERTIFICATES_DIR_NAME, - SKALE_DIR, NGINX_CONTAINER_NAME ) @@ -247,7 +246,7 @@ def compose_rm(env={}, sync_node: bool = False): compose_path = get_compose_path(sync_node) run_cmd( cmd=( - 'docker compose', + 'docker', 'compose', '-f', compose_path, 'down', '-t', str(COMPOSE_SHUTDOWN_TIMEOUT), @@ -257,34 +256,30 @@ def compose_rm(env={}, sync_node: bool = False): logger.info('Compose containers removed') -def compose_pull(sync_node: bool = False): +def compose_pull(env: dict, sync_node: bool = False): logger.info('Pulling compose containers') compose_path = get_compose_path(sync_node) run_cmd( - cmd=('docker compose', '-f', compose_path, 'pull'), - env={ - 'SKALE_DIR': SKALE_DIR - } + cmd=('docker', 'compose', '-f', compose_path, 'pull'), + env=env ) -def compose_build(sync_node: bool = False): +def compose_build(env: dict, sync_node: bool = False): logger.info('Building compose containers') compose_path = get_compose_path(sync_node) run_cmd( - cmd=('docker compose', '-f', compose_path, 'build'), - env={ - 'SKALE_DIR': SKALE_DIR - } + cmd=('docker', 'compose', '-f', compose_path, 'build'), + env=env ) def get_up_compose_cmd(services): - return ('docker compose', '-f', COMPOSE_PATH, 'up', '-d', *services) + return ('docker', 'compose', '-f', COMPOSE_PATH, 'up', '-d', *services) def get_up_compose_sync_cmd(): - return ('docker compose', '-f', SYNC_COMPOSE_PATH, 'up', '-d') + return ('docker', 'compose', '-f', SYNC_COMPOSE_PATH, 'up', '-d') def get_compose_path(sync_node: bool) -> str: @@ -302,6 +297,7 @@ def compose_up(env, sync_node=False): if 'SGX_CERTIFICATES_DIR_NAME' not in env: env['SGX_CERTIFICATES_DIR_NAME'] = SGX_CERTIFICATES_DIR_NAME + logger.debug('Launching containers with env %s', env) run_cmd(cmd=get_up_compose_cmd(BASE_COMPOSE_SERVICES), env=env) if str_to_bool(env.get('MONITORING_CONTAINERS', 'False')): logger.info('Running monitoring containers') diff --git a/scripts/run_nftables_test.sh b/scripts/run_nftables_test.sh index 98827e3c..e4bf8563 100755 --- a/scripts/run_nftables_test.sh +++ b/scripts/run_nftables_test.sh @@ -1,10 +1,6 @@ #!/usr/bin/env bash set -ea -# DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -# PROJECT_DIR=$(dirname $DIR) -# export DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" - docker rm -f ncli-tester || true docker build . -t ncli-tester docker run \