diff --git a/Dockerfile b/Dockerfile
index 2f6efa585..a7d00e9b4 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM python:3.8-buster
+FROM python:3.9-buster
RUN apt-get update && apt-get install -y wget git libxslt-dev iptables kmod swig3.0
RUN ln -s /usr/bin/swig3.0 /usr/bin/swig
diff --git a/VERSION b/VERSION
index 437459cd9..73462a5a1 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.5.0
+2.5.1
diff --git a/admin.py b/admin.py
index edfe26de8..0a70ee312 100644
--- a/admin.py
+++ b/admin.py
@@ -29,7 +29,7 @@
from core.updates import soft_updates
from core.filebeat import update_filebeat_service
-from tools.configs import BACKUP_RUN, INIT_LOCK_PATH
+from tools.configs import BACKUP_RUN, INIT_LOCK_PATH, PULL_CONFIG_FOR_SCHAIN
from tools.configs.web3 import (
ENDPOINT, ABI_FILEPATH, STATE_FILEPATH)
from tools.configs.ima import MAINNET_IMA_ABI_FILEPATH
@@ -38,7 +38,13 @@
from tools.sgx_utils import generate_sgx_key
from tools.wallet_utils import init_wallet
-from web.models.schain import create_tables, set_schains_first_run, set_schains_monitor_id
+from web.models.schain import (
+ create_tables,
+ set_schains_backup_run,
+ set_schains_first_run,
+ set_schains_monitor_id,
+ set_schains_sync_config_run
+)
from web.migrations import migrate
@@ -91,6 +97,10 @@ def init():
migrate()
set_schains_first_run()
set_schains_monitor_id()
+ if BACKUP_RUN:
+ set_schains_backup_run()
+ if PULL_CONFIG_FOR_SCHAIN:
+ set_schains_sync_config_run(PULL_CONFIG_FOR_SCHAIN)
cleanup_notification_state()
diff --git a/core/node.py b/core/node.py
index 88ed21f0b..62612e238 100644
--- a/core/node.py
+++ b/core/node.py
@@ -44,7 +44,6 @@
from core.filebeat import update_filebeat_service
from tools.configs import CHECK_REPORT_PATH, META_FILEPATH, WATCHDOG_PORT
-from tools.configs.web3 import NODE_REGISTER_CONFIRMATION_BLOCKS
from tools.helper import read_json
from tools.str_formatters import arguments_list_string
from tools.wallet_utils import check_required_balance
@@ -151,8 +150,7 @@ def create_node_on_contracts(self, ip, public_ip, port, name, domain_name,
gas_limit=gas_limit,
gas_price=gas_price,
skip_dry_run=skip_dry_run,
- wait_for=True,
- confirmation_blocks=NODE_REGISTER_CONFIRMATION_BLOCKS
+ wait_for=True
)
except TransactionFailedError:
logger.exception('Node creation failed')
diff --git a/core/schains/checks.py b/core/schains/checks.py
index 25d70cedb..cd331d591 100644
--- a/core/schains/checks.py
+++ b/core/schains/checks.py
@@ -18,34 +18,37 @@
# along with this program. If not, see .
import os
-import time
import logging
+import time
+from abc import ABC, abstractmethod
+from typing import Any, Dict, List, Optional
-from core.schains.config.directory import (
- get_schain_config,
- schain_config_dir,
- schain_config_filepath,
- get_schain_check_filepath
-)
+from core.schains.config.directory import get_schain_check_filepath
+from core.schains.config.file_manager import ConfigFileManager
from core.schains.config.helper import (
get_base_port_from_config,
get_node_ips_from_config,
get_own_ip_from_config,
- get_local_schain_http_endpoint
+ get_local_schain_http_endpoint_from_config
+)
+from core.schains.config.main import (
+ get_skaled_config_rotations_ids,
+ get_upstream_config_rotation_ids
)
-from core.schains.config.main import schain_config_version_match
from core.schains.dkg.utils import get_secret_key_share_filepath
from core.schains.firewall.types import IRuleController
from core.schains.ima import get_migration_ts as get_ima_migration_ts
from core.schains.process_manager_helper import is_monitor_process_alive
from core.schains.rpc import (
- check_endpoint_alive, check_endpoint_blocks, get_endpoint_alive_check_timeout
+ check_endpoint_alive,
+ check_endpoint_blocks,
+ get_endpoint_alive_check_timeout
)
+from core.schains.external_config import ExternalConfig, ExternalState
from core.schains.runner import get_container_name, get_image_name, is_new_image_pulled
from core.schains.skaled_exit_codes import SkaledExitCodes
from tools.configs.containers import IMA_CONTAINER, SCHAIN_CONTAINER
-from tools.configs.ima import DISABLE_IMA
from tools.docker_utils import DockerUtils
from tools.helper import write_json
from tools.str_formatters import arguments_list_string
@@ -69,38 +72,95 @@
'ima_container'
]
+TG_ALLOWED_CHECKS = [
+ 'volume',
+ 'firewall_rules',
+ 'skaled_container',
+ 'exit_code_ok',
+ 'rpc',
+ 'blocks',
+ 'process',
+ 'ima_container'
+]
+
class CheckRes:
def __init__(self, status: bool, data: dict = None):
self.status = status
self.data = data if data else {}
+ def __bool__(self) -> bool:
+ return self.status
-class SChainChecks:
- def __init__(
- self,
- schain_name: str,
- node_id: int,
- schain_record: SChainRecord,
- rule_controller: IRuleController,
- rotation_id: int = 0,
- *,
- ima_linked: bool = True,
- dutils: DockerUtils = None
- ):
+ def __str__(self) -> str:
+ return f'CheckRes<{self.status}>'
+
+
+class IChecks(ABC):
+ @abstractmethod
+ def get_name(self) -> str:
+ pass
+
+ def get_all(self,
+ log: bool = True,
+ save: bool = False,
+ needed: Optional[List[str]] = None) -> Dict:
+ if needed:
+ names = needed
+ else:
+ names = self.get_check_names()
+
+ checks_status = {}
+ for name in names:
+ if hasattr(self, name):
+ checks_status[name] = getattr(self, name).status
+ if log:
+ log_checks_dict(self.get_name(), checks_status)
+ if save:
+ save_checks_dict(self.get_name(), checks_status)
+ return checks_status
+
+ def is_healthy(self) -> bool:
+ checks = self.get_all()
+ return False not in checks.values()
+
+ @classmethod
+ def get_check_names(cls):
+ return list(filter(
+ lambda c: not c.startswith('_') and isinstance(
+ getattr(cls, c), property),
+ dir(cls)
+ ))
+
+
+class ConfigChecks(IChecks):
+ def __init__(self,
+ schain_name: str,
+ node_id: int,
+ schain_record: SChainRecord,
+ rotation_id: int,
+ stream_version: str,
+ estate: ExternalState,
+ econfig: Optional[ExternalConfig] = None
+ ) -> None:
self.name = schain_name
self.node_id = node_id
self.schain_record = schain_record
self.rotation_id = rotation_id
- self.dutils = dutils or DockerUtils()
- self.container_name = get_container_name(SCHAIN_CONTAINER, self.name)
- self.ima_linked = ima_linked
- self.rc = rule_controller
+ self.stream_version = stream_version
+ self.estate = estate
+ self.econfig = econfig or ExternalConfig(schain_name)
+ self.cfm: ConfigFileManager = ConfigFileManager(
+ schain_name=schain_name
+ )
+
+ def get_name(self) -> str:
+ return self.name
@property
def config_dir(self) -> CheckRes:
"""Checks that sChain config directory exists"""
- dir_path = schain_config_dir(self.name)
+ dir_path = self.cfm.dirname
return CheckRes(os.path.isdir(dir_path))
@property
@@ -113,15 +173,78 @@ def dkg(self) -> CheckRes:
return CheckRes(os.path.isfile(secret_key_share_filepath))
@property
- def config(self) -> CheckRes:
- """Checks that sChain config file exists"""
- config_filepath = schain_config_filepath(self.name)
- if not os.path.isfile(config_filepath):
- return CheckRes(False)
+ def upstream_config(self) -> CheckRes:
+ """Checks that config exists for rotation id and stream"""
+ exists = self.cfm.upstream_exist_for_rotation_id(self.rotation_id)
+
+ logger.debug('Upstream configs status for %s: %s', self.name, exists)
return CheckRes(
- schain_config_version_match(self.name, self.schain_record)
+ exists and
+ self.schain_record.config_version == self.stream_version and
+ not self.schain_record.sync_config_run
)
+ @property
+ def external_state(self) -> CheckRes:
+ actual_state = self.econfig.get()
+ logger.debug(
+ 'Checking external config. Current %s. Saved %s',
+ self.estate, actual_state
+ )
+ return CheckRes(self.econfig.synced(self.estate))
+
+
+class SkaledChecks(IChecks):
+ def __init__(
+ self,
+ schain_name: str,
+ schain_record: SChainRecord,
+ rule_controller: IRuleController,
+ *,
+ econfig: Optional[ExternalConfig] = None,
+ dutils: DockerUtils = None
+ ):
+ self.name = schain_name
+ self.schain_record = schain_record
+ self.dutils = dutils or DockerUtils()
+ self.container_name = get_container_name(SCHAIN_CONTAINER, self.name)
+ self.econfig = econfig or ExternalConfig(name=schain_name)
+ self.rc = rule_controller
+ self.cfm: ConfigFileManager = ConfigFileManager(
+ schain_name=schain_name
+ )
+
+ def get_name(self) -> str:
+ return self.name
+
+ @property
+ def upstream_exists(self) -> CheckRes:
+ return CheckRes(self.cfm.upstream_config_exists())
+
+ @property
+ def rotation_id_updated(self) -> CheckRes:
+ if not self.config:
+ return CheckRes(False)
+ upstream_rotations = get_upstream_config_rotation_ids(self.cfm)
+ config_rotations = get_skaled_config_rotations_ids(self.cfm)
+ logger.debug(
+ 'Comparing rotation_ids. Upstream: %s. Config: %s',
+ upstream_rotations,
+ config_rotations
+ )
+ return CheckRes(upstream_rotations == config_rotations)
+
+ @property
+ def config_updated(self) -> CheckRes:
+ if not self.config:
+ return CheckRes(False)
+ return CheckRes(self.cfm.skaled_config_synced_with_upstream())
+
+ @property
+ def config(self) -> CheckRes:
+ """ Checks that sChain config file exists """
+ return CheckRes(self.cfm.skaled_config_exists())
+
@property
def volume(self) -> CheckRes:
"""Checks that sChain volume exists"""
@@ -130,17 +253,19 @@ def volume(self) -> CheckRes:
@property
def firewall_rules(self) -> CheckRes:
"""Checks that firewall rules are set correctly"""
- if self.config.status:
- conf = get_schain_config(self.name)
+ if self.config:
+ conf = self.cfm.skaled_config
base_port = get_base_port_from_config(conf)
node_ips = get_node_ips_from_config(conf)
own_ip = get_own_ip_from_config(conf)
+ ranges = self.econfig.ranges
self.rc.configure(
base_port=base_port,
own_ip=own_ip,
- node_ips=node_ips
+ node_ips=node_ips,
+ sync_ip_ranges=ranges
)
- logger.info(f'Rule controller {self.rc.expected_rules()}')
+ logger.debug(f'Rule controller {self.rc.expected_rules()}')
return CheckRes(self.rc.is_rules_synced())
return CheckRes(False)
@@ -161,8 +286,11 @@ def exit_code_ok(self) -> CheckRes:
@property
def ima_container(self) -> CheckRes:
"""Checks that IMA container is running"""
+ if not self.econfig.ima_linked:
+ return CheckRes(True)
container_name = get_container_name(IMA_CONTAINER, self.name)
- new_image_pulled = is_new_image_pulled(type=IMA_CONTAINER, dutils=self.dutils)
+ new_image_pulled = is_new_image_pulled(
+ type=IMA_CONTAINER, dutils=self.dutils)
migration_ts = get_ima_migration_ts(self.name)
new = time.time() > migration_ts
@@ -191,8 +319,9 @@ def ima_container(self) -> CheckRes:
def rpc(self) -> CheckRes:
"""Checks that local skaled RPC is accessible"""
res = False
- if self.config.status:
- http_endpoint = get_local_schain_http_endpoint(self.name)
+ if self.config:
+ config = self.cfm.skaled_config
+ http_endpoint = get_local_schain_http_endpoint_from_config(config)
timeout = get_endpoint_alive_check_timeout(
self.schain_record.failed_rpc_count
)
@@ -202,8 +331,9 @@ def rpc(self) -> CheckRes:
@property
def blocks(self) -> CheckRes:
"""Checks that local skaled is mining blocks"""
- if self.config.status:
- http_endpoint = get_local_schain_http_endpoint(self.name)
+ if self.config:
+ config = self.cfm.skaled_config
+ http_endpoint = get_local_schain_http_endpoint_from_config(config)
return CheckRes(check_endpoint_blocks(http_endpoint))
return CheckRes(False)
@@ -212,31 +342,87 @@ def process(self) -> CheckRes:
"""Checks that sChain monitor process is running"""
return CheckRes(is_monitor_process_alive(self.schain_record.monitor_id))
- def get_all(self, log=True, save=False, checks_filter=None):
- if not checks_filter:
- checks_filter = API_ALLOWED_CHECKS
- checks_dict = {}
- for check in checks_filter:
- if check == 'ima_container' and (DISABLE_IMA or not self.ima_linked):
- logger.info(f'Check {check} will be skipped - IMA is not linked')
- elif check not in API_ALLOWED_CHECKS:
- logger.warning(f'Check {check} is not allowed or does not exist')
- else:
- checks_dict[check] = getattr(self, check).status
+ @property
+ def exit_zero(self) -> CheckRes:
+ """Check that sChain container exited with zero code"""
+ if self.dutils.is_container_running(self.container_name):
+ return CheckRes(False)
+ exit_code = self.dutils.container_exit_code(self.container_name)
+ return CheckRes(exit_code == SkaledExitCodes.EC_SUCCESS)
+
+
+class SChainChecks(IChecks):
+ def __init__(
+ self,
+ schain_name: str,
+ node_id: int,
+ schain_record: SChainRecord,
+ rule_controller: IRuleController,
+ stream_version: str,
+ estate: ExternalState,
+ rotation_id: int = 0,
+ *,
+ econfig: Optional[ExternalConfig] = None,
+ dutils: DockerUtils = None
+ ):
+ self._subjects = [
+ ConfigChecks(
+ schain_name=schain_name,
+ node_id=node_id,
+ schain_record=schain_record,
+ rotation_id=rotation_id,
+ stream_version=stream_version,
+ estate=estate,
+ econfig=econfig
+ ),
+ SkaledChecks(
+ schain_name=schain_name,
+ schain_record=schain_record,
+ rule_controller=rule_controller,
+ econfig=econfig,
+ dutils=dutils
+ )
+ ]
+
+ def __getattr__(self, attr: str) -> Any:
+ for subj in self._subjects:
+ if attr in dir(subj):
+ return getattr(subj, attr)
+ raise AttributeError(f'No such attribute {attr}')
+
+ def get_name(self) -> str:
+ return self.name
+
+ def get_all(self, log: bool = True, save: bool = False, needed: Optional[List[str]] = None):
+ needed = needed or API_ALLOWED_CHECKS
+
+ plain_checks = {}
+ for subj in self._subjects:
+ subj_checks = subj.get_all(
+ log=False,
+ save=False,
+ needed=needed
+ )
+ plain_checks.update(subj_checks)
+ if not self.estate.ima_linked:
+ if 'ima_container' in plain_checks:
+ del plain_checks['ima_container']
+
if log:
- log_checks_dict(self.name, checks_dict)
+ log_checks_dict(self.get_name(), plain_checks)
if save:
- save_checks_dict(self.name, checks_dict)
- return checks_dict
+ save_checks_dict(self.get_name(), plain_checks)
+ return plain_checks
- def is_healthy(self):
- checks = self.get_all()
- return False not in checks.values()
+
+def get_api_checks_status(status: Dict, allowed: List = API_ALLOWED_CHECKS) -> Dict:
+ return dict(filter(lambda r: r[0] in allowed, status.items()))
def save_checks_dict(schain_name, checks_dict):
schain_check_path = get_schain_check_filepath(schain_name)
- logger.info(f'Saving checks for the chain {schain_name}: {schain_check_path}')
+ logger.info(
+ f'Saving checks for the chain {schain_name}: {schain_check_path}')
try:
write_json(schain_check_path, {
'time': time.time(),
diff --git a/core/schains/cleaner.py b/core/schains/cleaner.py
index 9a67e27d7..2bf5c3a07 100644
--- a/core/schains/cleaner.py
+++ b/core/schains/cleaner.py
@@ -24,18 +24,20 @@
from sgx import SgxClient
+from core.node import get_skale_node_version
from core.schains.checks import SChainChecks
+from core.schains.config.file_manager import ConfigFileManager
from core.schains.config.directory import schain_config_dir
from core.schains.dkg.utils import get_secret_key_share_filepath
from core.schains.firewall.utils import get_default_rule_controller
from core.schains.config.helper import (
get_base_port_from_config,
get_node_ips_from_config,
- get_own_ip_from_config,
- get_schain_config
+ get_own_ip_from_config
)
from core.schains.process_manager_helper import terminate_schain_process
from core.schains.runner import get_container_name, is_exited
+from core.schains.external_config import ExternalConfig
from core.schains.types import ContainerType
from core.schains.firewall.utils import get_sync_agent_ranges
@@ -44,7 +46,6 @@
from tools.configs.containers import (
SCHAIN_CONTAINER, IMA_CONTAINER, SCHAIN_STOP_TIMEOUT
)
-from tools.configs.ima import DISABLE_IMA
from tools.docker_utils import DockerUtils
from tools.helper import merged_unique, read_json, is_node_part_of_chain
from tools.sgx_utils import SGX_SERVER_URL
@@ -58,7 +59,8 @@
def run_cleaner(skale, node_config):
- process = Process(target=monitor, args=(skale, node_config))
+ process = Process(name='cleaner', target=monitor,
+ args=(skale, node_config))
process.start()
logger.info('Cleaner process started')
process.join(JOIN_TIMEOUT)
@@ -202,10 +204,27 @@ def remove_schain(skale, node_id, schain_name, msg, dutils=None) -> None:
terminate_schain_process(schain_record)
delete_bls_keys(skale, schain_name)
sync_agent_ranges = get_sync_agent_ranges(skale)
- cleanup_schain(node_id, schain_name, sync_agent_ranges, dutils=dutils)
+ rotation_data = skale.node_rotation.get_rotation(schain_name)
+ rotation_id = rotation_data['rotation_id']
+ estate = ExternalConfig(name=schain_name).get()
+ cleanup_schain(
+ node_id,
+ schain_name,
+ sync_agent_ranges,
+ rotation_id=rotation_id,
+ estate=estate,
+ dutils=dutils
+ )
-def cleanup_schain(node_id, schain_name, sync_agent_ranges, dutils=None) -> None:
+def cleanup_schain(
+ node_id,
+ schain_name,
+ sync_agent_ranges,
+ rotation_id,
+ estate,
+ dutils=None
+) -> None:
dutils = dutils or DockerUtils()
schain_record = upsert_schain_record(schain_name)
@@ -213,35 +232,48 @@ def cleanup_schain(node_id, schain_name, sync_agent_ranges, dutils=None) -> None
name=schain_name,
sync_agent_ranges=sync_agent_ranges
)
+ stream_version = get_skale_node_version()
checks = SChainChecks(
schain_name,
node_id,
rule_controller=rc,
- schain_record=schain_record
+ stream_version=stream_version,
+ schain_record=schain_record,
+ rotation_id=rotation_id,
+ estate=estate
)
- if checks.skaled_container.status or is_exited(
+ status = checks.get_all()
+ if status['skaled_container'] or is_exited(
schain_name,
container_type=ContainerType.schain,
dutils=dutils
):
remove_schain_container(schain_name, dutils=dutils)
- if checks.volume.status:
+ if status['volume']:
remove_schain_volume(schain_name, dutils=dutils)
- if checks.firewall_rules.status:
- conf = get_schain_config(schain_name)
- base_port = get_base_port_from_config(conf)
- own_ip = get_own_ip_from_config(conf)
- node_ips = get_node_ips_from_config(conf)
- rc.configure(base_port=base_port, own_ip=own_ip, node_ips=node_ips)
- rc.cleanup()
- if not DISABLE_IMA:
- if checks.ima_container.status or is_exited(
+ if status['firewall_rules']:
+ conf = ConfigFileManager(schain_name).skaled_config
+ base_port = get_base_port_from_config(conf)
+ own_ip = get_own_ip_from_config(conf)
+ node_ips = get_node_ips_from_config(conf)
+ ranges = []
+ if estate is not None:
+ ranges = estate.ranges
+ rc.configure(
+ base_port=base_port,
+ own_ip=own_ip,
+ node_ips=node_ips,
+ sync_ip_ranges=ranges
+ )
+ rc.cleanup()
+ if estate is not None and estate.ima_linked:
+ if status.get('ima_container', False) or is_exited(
schain_name,
container_type=ContainerType.ima,
dutils=dutils
):
remove_ima_container(schain_name, dutils=dutils)
- if checks.config_dir.status:
+ if status['config_dir']:
remove_config_dir(schain_name)
mark_schain_deleted(schain_name)
diff --git a/core/schains/cmd.py b/core/schains/cmd.py
index 59e9e06e7..25e875285 100644
--- a/core/schains/cmd.py
+++ b/core/schains/cmd.py
@@ -17,19 +17,21 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-from core.schains.config.helper import get_schain_ports
+from core.schains.config.file_manager import ConfigFileManager
+from core.schains.config.helper import get_schain_ports_from_config
+from core.schains.config.main import get_skaled_container_config_path
from core.schains.config.static_params import get_static_schain_cmd
from core.schains.ssl import get_ssl_filepath
-from core.schains.config.directory import schain_config_filepath
-from tools.configs.containers import DATA_DIR_CONTAINER_PATH, SHARED_SPACE_CONTAINER_PATH
+
from tools.configs import SGX_SERVER_URL
+from tools.configs.containers import DATA_DIR_CONTAINER_PATH, SHARED_SPACE_CONTAINER_PATH
from tools.configs.ima import IMA_ENDPOINT
def get_schain_container_cmd(
schain_name: str,
- public_key: str = None,
start_ts: int = None,
+ download_snapshot: bool = False,
enable_ssl: bool = True,
snapshot_from: str = ''
) -> str:
@@ -37,7 +39,7 @@ def get_schain_container_cmd(
opts = get_schain_container_base_opts(schain_name, enable_ssl=enable_ssl)
if snapshot_from:
opts.extend(['--no-snapshot-majority', snapshot_from])
- if public_key:
+ if download_snapshot:
sync_opts = get_schain_container_sync_opts(start_ts)
opts.extend(sync_opts)
return ' '.join(opts)
@@ -54,9 +56,10 @@ def get_schain_container_sync_opts(start_ts: int = None) -> list:
def get_schain_container_base_opts(schain_name: str,
enable_ssl: bool = True) -> list:
- config_filepath = schain_config_filepath(schain_name, in_schain_container=True)
+ config_filepath = get_skaled_container_config_path(schain_name)
ssl_key, ssl_cert = get_ssl_filepath()
- ports = get_schain_ports(schain_name)
+ config = ConfigFileManager(schain_name=schain_name).skaled_config
+ ports = get_schain_ports_from_config(config)
static_schain_cmd = get_static_schain_cmd()
cmd = [
f'--config {config_filepath}',
diff --git a/core/schains/config/__init__.py b/core/schains/config/__init__.py
index accf53615..31297ca70 100644
--- a/core/schains/config/__init__.py
+++ b/core/schains/config/__init__.py
@@ -17,5 +17,4 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-from .main import init_schain_config # noqa
from .directory import init_schain_config_dir # noqa
diff --git a/core/schains/config/directory.py b/core/schains/config/directory.py
index fc5e209eb..8d2c7a66d 100644
--- a/core/schains/config/directory.py
+++ b/core/schains/config/directory.py
@@ -17,25 +17,24 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import os
import json
import logging
+import os
from pathlib import Path
+from typing import List
-from tools.configs import SCHAIN_CONFIG_DIR_SKALED
from tools.configs.schains import (
- SCHAINS_DIR_PATH, SCHAINS_DIR_PATH_HOST, BASE_SCHAIN_CONFIG_FILEPATH, SKALED_STATUS_FILENAME,
- SCHAIN_SCHECKS_FILENAME
+ BASE_SCHAIN_CONFIG_FILEPATH,
+ SCHAINS_DIR_PATH,
+ SCHAINS_DIR_PATH_HOST,
+ SCHAIN_SCHECKS_FILENAME,
+ SKALED_STATUS_FILENAME
)
logger = logging.getLogger(__name__)
-def _config_filename(name: str) -> str:
- return f'schain_{name}.json'
-
-
def schain_config_dir(name: str) -> str:
"""Get sChain config directory path in container"""
return os.path.join(SCHAINS_DIR_PATH, name)
@@ -48,44 +47,33 @@ def schain_config_dir_host(name: str) -> str:
def init_schain_config_dir(name: str) -> str:
"""Init empty sChain config directory"""
- logger.info(f'Initializing config directory for sChain: {name}')
+ logger.debug(f'Initializing config directory for sChain: {name}')
data_dir_path = schain_config_dir(name)
path = Path(data_dir_path)
os.makedirs(path, exist_ok=True)
-
-
-def schain_config_filepath(name: str, in_schain_container=False) -> str:
- schain_dir_path = SCHAIN_CONFIG_DIR_SKALED if in_schain_container else schain_config_dir(name)
- return os.path.join(schain_dir_path, _config_filename(name))
+ return data_dir_path
def skaled_status_filepath(name: str) -> str:
return os.path.join(schain_config_dir(name), SKALED_STATUS_FILENAME)
-def get_tmp_schain_config_filepath(schain_name):
- schain_dir_path = schain_config_dir(schain_name)
- return os.path.join(schain_dir_path,
- f'tmp_schain_{schain_name}.json')
-
-
def get_schain_check_filepath(schain_name):
schain_dir_path = schain_config_dir(schain_name)
return os.path.join(schain_dir_path, SCHAIN_SCHECKS_FILENAME)
-def get_schain_config(schain_name):
- config_filepath = schain_config_filepath(schain_name)
- with open(config_filepath) as f:
- schain_config = json.load(f)
- return schain_config
-
-
-def schain_config_exists(schain_name):
- config_filepath = schain_config_filepath(schain_name)
- return os.path.isfile(config_filepath)
-
-
def read_base_config():
json_data = open(BASE_SCHAIN_CONFIG_FILEPATH).read()
return json.loads(json_data)
+
+
+def get_files_with_prefix(config_dir: str, prefix: str) -> List[str]:
+ prefix_files = []
+ if os.path.isdir(config_dir):
+ prefix_files = [
+ os.path.join(config_dir, fname)
+ for fname in os.listdir(config_dir)
+ if fname.startswith(prefix)
+ ]
+ return sorted(prefix_files)
diff --git a/core/schains/config/file_manager.py b/core/schains/config/file_manager.py
new file mode 100644
index 000000000..eac7dd99f
--- /dev/null
+++ b/core/schains/config/file_manager.py
@@ -0,0 +1,209 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of SKALE Admin
+#
+# Copyright (C) 2021 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import logging
+import os
+import re
+import shutil
+import time
+import threading
+from abc import ABCMeta, abstractmethod
+from pathlib import Path
+from typing import ClassVar, Dict, List, Optional, TypeVar
+
+from core.schains.config.directory import get_files_with_prefix
+from tools.configs.schains import SCHAINS_DIR_PATH
+from tools.helper import read_json, write_json
+
+IConfigFilenameType = TypeVar('IConfigFilenameType', bound='IConfigFilename')
+
+logger = logging.getLogger(__name__)
+
+
+class IConfigFilename(metaclass=ABCMeta):
+ @property
+ @abstractmethod
+ def filename(self) -> str:
+ pass
+
+ def abspath(self, base_path: str) -> str:
+ return os.path.join(base_path, self.filename)
+
+ @classmethod
+ @abstractmethod
+ def from_filename(cls, filename: str) -> IConfigFilenameType:
+ pass
+
+
+class UpstreamConfigFilename(IConfigFilename):
+ def __init__(self, name: str, rotation_id: int, ts: int) -> None:
+ self.name = name
+ self.rotation_id = rotation_id
+ self.ts = ts
+
+ @property
+ def filename(self) -> str:
+ return f'schain_{self.name}_{self.rotation_id}_{self.ts}.json'
+
+ def __eq__(self, other) -> bool:
+ return self.name == other.name and \
+ self.rotation_id == other.rotation_id and \
+ self.ts == other.ts
+
+ def __lt__(self, other) -> bool:
+ if self.name != other.name:
+ return self.name < other.name
+ elif self.rotation_id != other.rotation_id:
+ return self.rotation_id < other.rotation_id
+ else:
+ return self.ts < other.ts
+
+ @classmethod
+ def from_filename(cls, filename: str):
+ stem = Path(filename).stem
+ ts_start = stem.rfind('_', 0, len(stem))
+ ts: int = int(stem[ts_start + 1:])
+ rid_start = stem.rfind('_', 0, ts_start)
+ rotation_id: int = int(stem[rid_start + 1: ts_start])
+ name = stem[:rid_start].replace('schain_', '', 1)
+ return cls(name=name, rotation_id=rotation_id, ts=ts)
+
+
+class SkaledConfigFilename(IConfigFilename):
+ def __init__(self, name: str) -> None:
+ self.name = name
+
+ @property
+ def filename(self) -> str:
+ return f'schain_{self.name}.json'
+
+ @classmethod
+ def from_filename(cls, filename: str):
+ _, name = filename.split('_')
+ return cls(name)
+
+
+class ConfigFileManager:
+ CFM_LOCK: ClassVar[threading.RLock] = threading.RLock()
+
+ def __init__(self, schain_name: str) -> None:
+ self.schain_name: str = schain_name
+ self.dirname: str = os.path.join(SCHAINS_DIR_PATH, schain_name)
+ self.upstream_prefix = f'schain_{schain_name}_'
+
+ def get_upstream_configs(self) -> List[UpstreamConfigFilename]:
+ pattern = re.compile(rf'{self.upstream_prefix}\d+_\d+.json')
+ with ConfigFileManager.CFM_LOCK:
+ filenames = get_files_with_prefix(
+ self.dirname,
+ self.upstream_prefix
+ )
+ return sorted(
+ map(
+ UpstreamConfigFilename.from_filename,
+ filter(pattern.search, filenames)
+ )
+ )
+
+ @property
+ def latest_upstream_path(self) -> Optional[str]:
+ upstreams = self.get_upstream_configs()
+ if len(upstreams) == 0:
+ return None
+ return upstreams[-1].abspath(self.dirname)
+
+ @property
+ def skaled_config_path(self) -> str:
+ return SkaledConfigFilename(self.schain_name).abspath(self.dirname)
+
+ def upstream_config_exists(self) -> bool:
+ with ConfigFileManager.CFM_LOCK:
+ path = self.latest_upstream_path
+ return path is not None and os.path.isfile(path)
+
+ def skaled_config_exists(self) -> bool:
+ path = SkaledConfigFilename(self.schain_name).abspath(self.dirname)
+ with ConfigFileManager.CFM_LOCK:
+ return os.path.isfile(path)
+
+ @property
+ def latest_upstream_config(self) -> Optional[Dict]:
+ with ConfigFileManager.CFM_LOCK:
+ if not self.upstream_config_exists():
+ return None
+ return read_json(self.latest_upstream_path)
+
+ @property
+ def skaled_config(self):
+ with ConfigFileManager.CFM_LOCK:
+ if not self.skaled_config_exists():
+ return None
+ return read_json(self.skaled_config_path)
+
+ def skaled_config_synced_with_upstream(self) -> bool:
+ with ConfigFileManager.CFM_LOCK:
+ if not self.skaled_config_exists():
+ return False
+ if not self.upstream_config_exists():
+ return True
+ return self.latest_upstream_config == self.skaled_config
+
+ def get_new_upstream_filepath(self, rotation_id: int) -> str:
+ ts = int(time.time())
+ filename = UpstreamConfigFilename(
+ self.schain_name,
+ rotation_id=rotation_id,
+ ts=ts
+ )
+ return filename.abspath(self.dirname)
+
+ def save_new_upstream(self, rotation_id: int, config: Dict) -> None:
+ with ConfigFileManager.CFM_LOCK:
+ config_path = self.get_new_upstream_filepath(rotation_id)
+ write_json(config_path, config)
+
+ def save_skaled_config(self, config: Dict) -> None:
+ with ConfigFileManager.CFM_LOCK:
+ write_json(self.skaled_config_path, config)
+
+ def sync_skaled_config_with_upstream(self) -> bool:
+ with ConfigFileManager.CFM_LOCK:
+ if not self.upstream_config_exists():
+ return False
+ upath = self.latest_upstream_path or ''
+ path = self.skaled_config_path
+ logger.debug('Syncing %s with %s', path, upath)
+ shutil.copy(upath, path)
+ return True
+
+ def upstreams_by_rotation_id(self, rotation_id: int) -> List[str]:
+ return [
+ fp.abspath(self.dirname)
+ for fp in self.get_upstream_configs()
+ if fp.rotation_id == rotation_id
+ ]
+
+ def upstream_exist_for_rotation_id(self, rotation_id: int) -> bool:
+ return len(self.upstreams_by_rotation_id(rotation_id)) > 0
+
+ def remove_skaled_config(self) -> None:
+ with ConfigFileManager.CFM_LOCK:
+ if self.skaled_config_exists():
+ logger.info('Removing skaled config')
+ os.remove(self.skaled_config_path)
diff --git a/core/schains/config/helper.py b/core/schains/config/helper.py
index 3c876edaa..384f8aae1 100644
--- a/core/schains/config/helper.py
+++ b/core/schains/config/helper.py
@@ -17,21 +17,15 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import json
import logging
-import os
-from typing import Dict, List
+from typing import Dict, List, Optional, Tuple
from Crypto.Hash import keccak
from web3 import Web3
-from skale.dataclasses.skaled_ports import SkaledPorts
-
-from core.schains.config.directory import schain_config_filepath
from core.schains.dkg.utils import get_secret_key_share_filepath
from tools.helper import read_json
from tools.configs import STATIC_PARAMS_FILEPATH, ENV_TYPE
-from tools.configs.containers import LOCAL_IP
from tools.helper import safe_load_yml
@@ -44,7 +38,7 @@ def get_static_params(env_type=ENV_TYPE, path=STATIC_PARAMS_FILEPATH):
def fix_address(address):
- return Web3.toChecksumAddress(address)
+ return Web3.to_checksum_address(address)
def get_chain_id(schain_name: str) -> str:
@@ -73,7 +67,7 @@ def get_base_port_from_config(config: Dict) -> int:
return config['skaleConfig']['nodeInfo']['basePort']
-def get_own_ip_from_config(config: Dict) -> str:
+def get_own_ip_from_config(config: Dict) -> Optional[str]:
schain_nodes_config = config['skaleConfig']['sChain']['nodes']
own_id = config['skaleConfig']['nodeInfo']['nodeID']
for node_data in schain_nodes_config:
@@ -82,12 +76,7 @@ def get_own_ip_from_config(config: Dict) -> str:
return None
-def get_schain_ports(schain_name):
- config = get_schain_config(schain_name)
- return get_schain_ports_from_config(config)
-
-
-def get_schain_ports_from_config(config):
+def get_schain_ports_from_config(config: Dict):
if config is None:
return {}
node_info = config["skaleConfig"]["nodeInfo"]
@@ -100,28 +89,6 @@ def get_schain_ports_from_config(config):
}
-def get_skaled_http_address(schain_name: str) -> str:
- config = get_schain_config(schain_name)
- return get_skaled_http_address_from_config(config)
-
-
-def get_skaled_http_address_from_config(config: Dict) -> str:
- node = config['skaleConfig']['nodeInfo']
- return 'http://{}:{}'.format(
- LOCAL_IP,
- node['basePort'] + SkaledPorts.HTTP_JSON.value
- )
-
-
-def get_schain_config(schain_name):
- config_filepath = schain_config_filepath(schain_name)
- if not os.path.isfile(config_filepath):
- return None
- with open(config_filepath) as f:
- schain_config = json.load(f)
- return schain_config
-
-
def get_schain_env(ulimit_check=True):
env = {'SEGFAULT_SIGNALS': 'all'}
if not ulimit_check:
@@ -131,20 +98,18 @@ def get_schain_env(ulimit_check=True):
return env
-def get_schain_rpc_ports(schain_id):
- schain_config = get_schain_config(schain_id)
- node_info = schain_config["skaleConfig"]["nodeInfo"]
+def get_schain_rpc_ports_from_config(config: Dict) -> Tuple[int, int]:
+ node_info = config["skaleConfig"]["nodeInfo"]
return int(node_info["httpRpcPort"]), int(node_info["wsRpcPort"])
-def get_local_schain_http_endpoint(name):
- http_port, _ = get_schain_rpc_ports(name)
- return f'http://0.0.0.0:{http_port}'
+def get_local_schain_http_endpoint_from_config(config: Dict) -> str:
+ http_port, _ = get_schain_rpc_ports_from_config(config)
+ return f'http://127.0.0.1:{http_port}'
-def get_schain_ssl_rpc_ports(schain_id):
- schain_config = get_schain_config(schain_id)
- node_info = schain_config["skaleConfig"]["nodeInfo"]
+def get_schain_ssl_rpc_ports_from_config(config: Dict) -> Tuple[int, int]:
+ node_info = config["skaleConfig"]["nodeInfo"]
return int(node_info["httpsRpcPort"]), int(node_info["wssRpcPort"])
diff --git a/core/schains/config/main.py b/core/schains/config/main.py
index 81aa39288..0dc962d57 100644
--- a/core/schains/config/main.py
+++ b/core/schains/config/main.py
@@ -1,5 +1,5 @@
-# -*- coding: utf-8 -*-
#
+# -*- coding: utf-8 -*-
# This file is part of SKALE Admin
#
# Copyright (C) 2021-Present SKALE Labs
@@ -17,18 +17,17 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import json
-import shutil
import logging
+from typing import Dict, List, Optional
from skale import Skale
from core.node import get_skale_node_version
+from core.schains.config.directory import get_files_with_prefix, schain_config_dir
+from core.schains.config.file_manager import ConfigFileManager, SkaledConfigFilename
from core.schains.config.generator import generate_schain_config_with_skale
-from core.schains.config.directory import get_tmp_schain_config_filepath
-from core.schains.config.directory import schain_config_filepath
-from tools.str_formatters import arguments_list_string
+from tools.configs import SCHAIN_CONFIG_DIR_SKALED
from web.models.schain import upsert_schain_record, SChainRecord
@@ -36,21 +35,18 @@
logger = logging.getLogger(__name__)
-def init_schain_config(
+def create_new_upstream_config(
skale: Skale,
node_id: int,
schain_name: str,
generation: int,
ecdsa_sgx_key_name: str,
rotation_data: dict,
- schain_record: SChainRecord
-):
- config_filepath = schain_config_filepath(schain_name)
-
- logger.warning(arguments_list_string({
- 'sChain name': schain_name,
- 'config_filepath': config_filepath
- }, 'Generating sChain config'))
+ stream_version: str,
+ schain_record: SChainRecord,
+ file_manager: ConfigFileManager
+) -> Dict:
+ logger.info('Generating sChain config for %s', schain_name)
schain_config = generate_schain_config_with_skale(
skale=skale,
@@ -60,16 +56,7 @@ def init_schain_config(
rotation_data=rotation_data,
ecdsa_key_name=ecdsa_sgx_key_name
)
- save_schain_config(schain_config.to_dict(), schain_name)
- update_schain_config_version(schain_name, schain_record=schain_record)
-
-
-def save_schain_config(schain_config, schain_name):
- tmp_config_filepath = get_tmp_schain_config_filepath(schain_name)
- with open(tmp_config_filepath, 'w') as outfile:
- json.dump(schain_config, outfile, indent=4)
- config_filepath = schain_config_filepath(schain_name)
- shutil.move(tmp_config_filepath, config_filepath)
+ return schain_config.to_dict()
def update_schain_config_version(schain_name, schain_record=None):
@@ -83,6 +70,66 @@ def update_schain_config_version(schain_name, schain_record=None):
def schain_config_version_match(schain_name, schain_record=None):
schain_record = schain_record or upsert_schain_record(schain_name)
skale_node_version = get_skale_node_version()
- logger.debug(f'config check, schain: {schain_name}, config_version: \
+ logger.info(f'config check, schain: {schain_name}, config_version: \
{schain_record.config_version}, skale_node_version: {skale_node_version}')
return schain_record.config_version == skale_node_version
+
+
+def get_node_groups_from_config(config: Dict) -> Dict:
+ return config['skaleConfig']['sChain']['nodeGroups']
+
+
+def get_rotation_ids_from_config(config: Optional[Dict]) -> List[int]:
+ if not config:
+ return []
+ node_groups = get_node_groups_from_config(config)
+ rotation_ids = list(sorted(map(int, node_groups.keys())))
+ return rotation_ids
+
+
+def get_upstream_config_rotation_ids(file_manager: ConfigFileManager) -> List[int]:
+ logger.debug('Retrieving upstream rotation_ids')
+ config = file_manager.latest_upstream_config
+ return get_rotation_ids_from_config(config)
+
+
+def get_skaled_config_rotations_ids(file_manager: ConfigFileManager) -> List[int]:
+ logger.debug('Retrieving rotation_ids')
+ config = file_manager.skaled_config
+ return get_rotation_ids_from_config(config)
+
+
+def get_latest_finish_ts(config: Dict) -> Optional[int]:
+ node_groups = get_node_groups_from_config(config)
+ rotation_ids = iter(sorted(map(int, node_groups.keys()), reverse=True))
+ finish_ts = None
+ try:
+ while finish_ts is None:
+ rotation_id = next(rotation_ids)
+ finish_ts = node_groups[str(rotation_id)]['finish_ts']
+ except StopIteration:
+ logger.debug('No finish_ts found in config')
+
+ return finish_ts
+
+
+def get_finish_ts_from_latest_upstream(file_manager: ConfigFileManager) -> Optional[int]:
+ config = file_manager.latest_upstream_config
+ if not config:
+ return None
+ return get_latest_finish_ts(config)
+
+
+def get_finish_ts_from_skaled_config(file_manager: ConfigFileManager) -> Optional[int]:
+ config = file_manager.skaled_config
+ return get_latest_finish_ts(config)
+
+
+def get_number_of_secret_shares(schain_name: str) -> int:
+ config_dir = schain_config_dir(schain_name)
+ prefix = 'secret_key_'
+ return len(get_files_with_prefix(config_dir, prefix))
+
+
+def get_skaled_container_config_path(schain_name: str) -> str:
+ return SkaledConfigFilename(schain_name).abspath(SCHAIN_CONFIG_DIR_SKALED)
diff --git a/core/schains/dkg/broadcast_filter.py b/core/schains/dkg/broadcast_filter.py
index 0bb25e8d6..eb3e69bba 100644
--- a/core/schains/dkg/broadcast_filter.py
+++ b/core/schains/dkg/broadcast_filter.py
@@ -35,8 +35,8 @@ class DKGEvent:
class Filter:
def __init__(self, skale, schain_name, n):
self.skale = skale
- self.group_index = skale.web3.sha3(text=schain_name)
- self.group_index_str = self.skale.web3.toHex(self.group_index)
+ self.group_index = skale.web3.keccak(text=schain_name)
+ self.group_index_str = self.skale.web3.to_hex(self.group_index)
self.first_unseen_block = -1
self.dkg_contract = skale.dkg.contract
self.dkg_contract_address = skale.dkg.address
@@ -44,7 +44,7 @@ def __init__(self, skale, schain_name, n):
self.n = n
self.t = (2 * n + 1) // 3
# TODO: use scheme below to calculate event hash
- # self.skale.web3.toHex(self.skale.web3.sha3(
+ # self.skale.web3.to_hex(self.skale.web3.keccak(
# text="BroadcastAndKeyShare(bytes32,uint256,tuple[],tuple[])")
# )
@@ -75,7 +75,7 @@ def check_event(self, receipt):
return True
def parse_event(self, receipt):
- event_data = receipt['logs'][0]['data'][2:]
+ event_data = receipt['logs'][0]['data'].hex()[2:]
node_index = int(receipt['logs'][0]['topics'][2].hex()[2:], 16)
vv = event_data[192: 192 + self.t * 256]
skc = event_data[192 + 64 + self.t * 256: 192 + 64 + self.t * 256 + 192 * self.n]
@@ -90,12 +90,12 @@ def get_events(self, from_channel_started_block=False):
).call()
else:
start_block = self.first_unseen_block
- current_block = self.skale.web3.eth.getBlock("latest")["number"]
+ current_block = self.skale.web3.eth.get_block("latest")["number"]
logger.info(f'sChain {self.group_index_str}: Parsing broadcast events from {start_block}'
f' block to {current_block} block')
events = []
for block_number in range(start_block, current_block + 1):
- block = self.skale.web3.eth.getBlock(block_number, full_transactions=True)
+ block = self.skale.web3.eth.get_block(block_number, full_transactions=True)
txns = block["transactions"]
for tx in txns:
try:
@@ -104,7 +104,7 @@ def get_events(self, from_channel_started_block=False):
hash = tx.get("hash")
if hash:
- receipt = self.skale.web3.eth.getTransactionReceipt(hash)
+ receipt = self.skale.web3.eth.get_transaction_receipt(hash)
else:
logger.info(f'sChain {self.group_index_str}: tx {tx}'
f' does not have field "hash"')
diff --git a/core/schains/dkg/client.py b/core/schains/dkg/client.py
index 1ee5044a3..00ff77179 100644
--- a/core/schains/dkg/client.py
+++ b/core/schains/dkg/client.py
@@ -143,7 +143,7 @@ def __init__(self, node_id_dkg, node_id_contract, skale, t, n, schain_name, publ
self.t = t
self.n = n
self.eth_key_name = eth_key_name
- group_index_str = str(int(skale.web3.toHex(self.group_index)[2:], 16))
+ group_index_str = str(int(skale.web3.to_hex(self.group_index)[2:], 16))
self.poly_name = generate_poly_name(group_index_str, self.node_id_dkg, rotation_id)
self.bls_name = generate_bls_key_name(group_index_str, self.node_id_dkg, rotation_id)
self.incoming_verification_vector = ['0' for _ in range(n)]
@@ -153,7 +153,7 @@ def __init__(self, node_id_dkg, node_id_contract, skale, t, n, schain_name, publ
self.node_ids_contract = node_ids_contract
self.dkg_contract_functions = self.skale.dkg.contract.functions
self.dkg_timeout = self.skale.constants_holder.get_dkg_timeout()
- self.complaint_error_event_hash = self.skale.web3.toHex(self.skale.web3.sha3(
+ self.complaint_error_event_hash = self.skale.web3.to_hex(self.skale.web3.keccak(
text="ComplaintError(string)"
))
logger.info(
diff --git a/core/schains/dkg/utils.py b/core/schains/dkg/utils.py
index b5c88ab15..87ee20573 100644
--- a/core/schains/dkg/utils.py
+++ b/core/schains/dkg/utils.py
@@ -275,7 +275,7 @@ def wait_for_fail(skale, schain_name, channel_started_time, reason=""):
def get_latest_block_timestamp(skale):
- return skale.web3.eth.getBlock("latest")["timestamp"]
+ return skale.web3.eth.get_block("latest")["timestamp"]
def get_secret_key_share_filepath(schain_name, rotation_id):
diff --git a/core/schains/external_config.py b/core/schains/external_config.py
new file mode 100644
index 000000000..4975c8426
--- /dev/null
+++ b/core/schains/external_config.py
@@ -0,0 +1,72 @@
+import os
+import threading
+from dataclasses import dataclass, field
+from typing import Dict, List, Optional
+
+from core.schains.firewall.types import IpRange
+from core.schains.config.directory import schain_config_dir
+from tools.helper import read_json, write_json
+
+
+@dataclass
+class ExternalState:
+ chain_id: int
+ ranges: field(default_factory=list)
+ ima_linked: bool = False
+
+ def to_dict(self):
+ return {
+ 'chain_id': self.chain_id,
+ 'ima_linked': self.ima_linked,
+ 'ranges': list(map(list, self.ranges))
+ }
+
+
+class ExternalConfig:
+ FILENAME = 'external.json'
+
+ _lock = threading.Lock()
+
+ def __init__(self, name: str) -> None:
+ self.path = os.path.join(schain_config_dir(name), ExternalConfig.FILENAME)
+
+ @property
+ def ima_linked(self) -> bool:
+ return self.read().get('ima_linked', True)
+
+ @property
+ def chain_id(self) -> Optional[int]:
+ return self.read().get('chain_id', None)
+
+ @property
+ def ranges(self) -> List[IpRange]:
+ plain_ranges = self.read().get('ranges', [])
+ return list(sorted(map(lambda r: IpRange(*r), plain_ranges)))
+
+ def get(self) -> Optional[ExternalState]:
+ plain = self.read()
+ if plain:
+ return ExternalState(
+ chain_id=plain['chain_id'],
+ ima_linked=plain['ima_linked'],
+ ranges=list(sorted(map(lambda r: IpRange(*r), plain['ranges'])))
+
+ )
+ return None
+
+ def read(self) -> Dict:
+ data = {}
+ with ExternalConfig._lock:
+ if os.path.isfile(self.path):
+ data = read_json(self.path)
+ return data
+
+ def write(self, content: Dict) -> None:
+ with ExternalConfig._lock:
+ write_json(self.path, content)
+
+ def update(self, ex_state: ExternalState) -> None:
+ self.write(ex_state.to_dict())
+
+ def synced(self, ex_state: ExternalState) -> bool:
+ return self.get() == ex_state
diff --git a/core/schains/firewall/__init__.py b/core/schains/firewall/__init__.py
index 85a7c06b0..8edbd1a7c 100644
--- a/core/schains/firewall/__init__.py
+++ b/core/schains/firewall/__init__.py
@@ -20,4 +20,5 @@
from .firewall_manager import SChainFirewallManager # noqa
from .iptables import IptablesController # noqa
from .rule_controller import SChainRuleController # noqa
+from .types import IRuleController # noqa
from .utils import get_default_rule_controller # noqa
diff --git a/core/schains/firewall/rule_controller.py b/core/schains/firewall/rule_controller.py
index e71e456bb..2fda28168 100644
--- a/core/schains/firewall/rule_controller.py
+++ b/core/schains/firewall/rule_controller.py
@@ -190,9 +190,9 @@ def is_rules_synced(self) -> bool:
expected = set(self.expected_rules())
logger.debug('Rules status: actual %s, expected %s', actual, expected)
logger.info(
- 'Rules status: missing rules %s, redundant rules: %s',
- expected - actual,
- actual - expected
+ 'Rules status: missing rules %d, redundant rules: %d',
+ len(expected - actual),
+ len(actual - expected)
)
return actual == expected
diff --git a/core/schains/firewall/utils.py b/core/schains/firewall/utils.py
index cfc524062..737361e18 100644
--- a/core/schains/firewall/utils.py
+++ b/core/schains/firewall/utils.py
@@ -17,8 +17,10 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import json
import logging
-from typing import List, Optional
+
+from typing import List, Optional, Tuple
from skale import Skale
@@ -53,4 +55,14 @@ def get_sync_agent_ranges(skale: Skale) -> List[IpRange]:
rnum = skale.sync_manager.get_ip_ranges_number()
for i in range(rnum):
sync_agent_ranges.append(skale.sync_manager.get_ip_range_by_index(i))
- return sync_agent_ranges
+ return sorted(sync_agent_ranges)
+
+
+def save_sync_ranges(sync_agent_ranges: List[IpRange], path: str) -> None:
+ output = {'ranges': [list(r) for r in sync_agent_ranges]}
+ with open(path, 'w') as out_file:
+ json.dump(output, out_file)
+
+
+def ranges_from_plain_tuples(plain_ranges: List[Tuple]) -> List[IpRange]:
+ return list(sorted(map(lambda r: IpRange(*r), plain_ranges)))
diff --git a/core/schains/ima.py b/core/schains/ima.py
index 7fe8a5bf2..6547510d7 100644
--- a/core/schains/ima.py
+++ b/core/schains/ima.py
@@ -27,7 +27,8 @@
from websocket import create_connection
from core.schains.config.directory import schain_config_dir
-from core.schains.config.helper import get_schain_ports, get_schain_config, get_chain_id
+from core.schains.config.file_manager import ConfigFileManager
+from core.schains.config.helper import get_schain_ports_from_config, get_chain_id
from core.ima.schain import get_schain_ima_abi_filepath
from tools.configs import ENV_TYPE, SGX_SSL_KEY_FILEPATH, SGX_SSL_CERT_FILEPATH, SGX_SERVER_URL
from tools.configs.containers import CONTAINERS_INFO, IMA_MIGRATION_PATH
@@ -48,7 +49,7 @@
@dataclass
class ImaData:
linked: bool
- chain_id: str
+ chain_id: int
@dataclass
@@ -120,17 +121,20 @@ def get_current_node_from_nodes(node_id, schain_nodes):
def get_localhost_http_endpoint(schain_name):
- ports = get_schain_ports(schain_name)
+ config = ConfigFileManager(schain_name).skaled_config
+ ports = get_schain_ports_from_config(config)
return f'http://127.0.0.1:{ports["http"]}'
def get_public_http_endpoint(public_node_info, schain_name):
- ports = get_schain_ports(schain_name)
+ config = ConfigFileManager(schain_name).skaled_config
+ ports = get_schain_ports_from_config(config)
return f'http://{public_node_info["ip"]}:{ports["http"]}'
def get_local_http_endpoint(node_info, schain_name):
- ports = get_schain_ports(schain_name)
+ config = ConfigFileManager(schain_name).skaled_config
+ ports = get_schain_ports_from_config(config)
return f'http://{node_info["bindIP"]}:{ports["http"]}'
@@ -139,11 +143,12 @@ def schain_index_to_node_number(node):
def get_ima_env(schain_name: str, mainnet_chain_id: int) -> ImaEnv:
- schain_config = get_schain_config(schain_name)
+ schain_config = ConfigFileManager(schain_name).skaled_config
node_info = schain_config["skaleConfig"]["nodeInfo"]
bls_key_name = node_info['wallets']['ima']['keyShareName']
schain_nodes = schain_config["skaleConfig"]["sChain"]
- public_node_info = get_current_node_from_nodes(node_info['nodeID'], schain_nodes)
+ public_node_info = get_current_node_from_nodes(
+ node_info['nodeID'], schain_nodes)
schain_index = schain_index_to_node_number(public_node_info)
node_address = public_node_info['owner']
@@ -181,7 +186,7 @@ def get_ima_version() -> str:
def get_ima_monitoring_port(schain_name):
- schain_config = get_schain_config(schain_name)
+ schain_config = ConfigFileManager(schain_name).skaled_config
if schain_config:
node_info = schain_config["skaleConfig"]["nodeInfo"]
return int(node_info["imaMonitoringPort"])
@@ -190,13 +195,14 @@ def get_ima_monitoring_port(schain_name):
def get_ima_rpc_port(schain_name):
- config = get_schain_config(schain_name)
+ config = ConfigFileManager(schain_name).skaled_config
base_port = config['skaleConfig']['nodeInfo']['basePort']
return base_port + SkaledPorts.IMA_RPC.value
def get_ima_container_statuses():
- containers_list = g.docker_utils.get_all_ima_containers(all=True, format=True)
+ containers_list = g.docker_utils.get_all_ima_containers(
+ all=True, format=True)
ima_containers = [{'name': container['name'], 'state': container['state']['Status']}
for container in containers_list]
return ima_containers
@@ -229,7 +235,8 @@ def get_ima_log_checks():
errors = []
categories = []
container_name = f'skale_ima_{schain_name}'
- cont_data = next((item for item in ima_containers if item["name"] == container_name), None)
+ cont_data = next(
+ (item for item in ima_containers if item["name"] == container_name), None)
if cont_data is None:
continue
elif cont_data['state'] != 'running':
@@ -247,7 +254,8 @@ def get_ima_log_checks():
try:
ima_healthcheck = request_ima_healthcheck(endpoint)
except Exception as err:
- logger.info(f'Error occurred while checking IMA state on {endpoint}')
+ logger.info(
+ f'Error occurred while checking IMA state on {endpoint}')
logger.exception(err)
error_text = repr(err)
else:
diff --git a/core/schains/monitor/__init__.py b/core/schains/monitor/__init__.py
index 4fc8e3145..b8331a27e 100644
--- a/core/schains/monitor/__init__.py
+++ b/core/schains/monitor/__init__.py
@@ -17,10 +17,5 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-from .base_monitor import BaseMonitor # noqa
-from .regular_monitor import RegularMonitor # noqa
-from .repair_monitor import RepairMonitor # noqa
-from .backup_monitor import BackupMonitor # noqa
-from .rotation_monitor import RotationMonitor # noqa
-from .post_rotation_monitor import PostRotationMonitor # noqa
-from .reload_monitor import ReloadMonitor # noqa
+from .config_monitor import RegularConfigMonitor # noqa
+from .skaled_monitor import get_skaled_monitor # noqa
diff --git a/core/schains/monitor/action.py b/core/schains/monitor/action.py
new file mode 100644
index 000000000..34ce0e625
--- /dev/null
+++ b/core/schains/monitor/action.py
@@ -0,0 +1,484 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of SKALE Admin
+#
+# Copyright (C) 2021-Present SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import logging
+import time
+from datetime import datetime
+from functools import wraps
+from typing import Dict, Optional
+
+from skale import Skale
+
+from core.node_config import NodeConfig
+from core.schains.checks import ConfigChecks, SkaledChecks
+from core.schains.dkg import safe_run_dkg, save_dkg_results, DkgError
+from core.schains.dkg.utils import get_secret_key_share_filepath
+from core.schains.ima import get_migration_ts as get_ima_migration_ts
+
+from core.schains.cleaner import (
+ remove_ima_container,
+ remove_schain_container,
+ remove_schain_volume
+)
+from core.schains.firewall.types import IRuleController
+
+from core.schains.volume import init_data_volume
+from core.schains.rotation import set_rotation_for_schain
+
+from core.schains.limits import get_schain_type
+
+from core.schains.monitor.containers import monitor_schain_container, monitor_ima_container
+from core.schains.monitor.rpc import handle_failed_schain_rpc
+from core.schains.runner import (
+ get_container_name,
+ is_container_exists,
+ pull_new_image,
+ restart_container
+)
+from core.schains.config.main import (
+ create_new_upstream_config,
+ get_finish_ts_from_skaled_config,
+ get_finish_ts_from_latest_upstream
+)
+from core.schains.config import init_schain_config_dir
+from core.schains.config.main import update_schain_config_version
+from core.schains.config.file_manager import ConfigFileManager
+from core.schains.config.helper import (
+ get_base_port_from_config,
+ get_node_ips_from_config,
+ get_local_schain_http_endpoint_from_config,
+ get_own_ip_from_config
+)
+from core.schains.ima import ImaData
+from core.schains.external_config import ExternalConfig, ExternalState
+from core.schains.skaled_status import init_skaled_status
+
+from tools.docker_utils import DockerUtils
+from tools.str_formatters import arguments_list_string
+from tools.configs.containers import IMA_CONTAINER, SCHAIN_CONTAINER
+
+from tools.notifications.messages import notify_repair_mode
+from web.models.schain import SChainRecord, upsert_schain_record
+
+
+logger = logging.getLogger(__name__)
+
+
+CONTAINER_POST_RUN_DELAY = 20
+SCHAIN_CLEANUP_TIMEOUT = 10
+
+
+class BaseActionManager:
+ def __init__(self, name: str):
+ self.name = name
+ self.executed_blocks: Dict = {}
+
+ @staticmethod
+ def monitor_block(f):
+ @wraps(f)
+ def _monitor_block(self, *args, **kwargs):
+ ts = time.time()
+ initial_status = f(self, *args, **kwargs)
+ te = time.time()
+ self.executed_blocks[f.__name__] = {
+ 'ts': ts,
+ 'te': te,
+ 'initial_status': initial_status
+ }
+ return initial_status
+ return _monitor_block
+
+ @property
+ def schain_record(self) -> SChainRecord:
+ return upsert_schain_record(self.name)
+
+ def _upd_last_seen(self) -> None:
+ self.schain_record.set_monitor_last_seen(datetime.now())
+
+ def _upd_schain_record(self) -> None:
+ if self.schain_record.first_run:
+ self.schain_record.set_restart_count(0)
+ self.schain_record.set_failed_rpc_count(0)
+ self.schain_record.set_first_run(False)
+ self.schain_record.set_new_schain(False)
+ logger.info(
+ 'restart_count - %s, failed_rpc_count - %s',
+ self.schain_record.restart_count,
+ self.schain_record.failed_rpc_count
+ )
+
+ def log_executed_blocks(self) -> None:
+ logger.info(arguments_list_string(
+ self.executed_blocks, f'Finished monitor runner - {self.name}'))
+
+
+class ConfigActionManager(BaseActionManager):
+ def __init__(
+ self,
+ skale: Skale,
+ schain: dict,
+ node_config: NodeConfig,
+ rotation_data: dict,
+ stream_version: str,
+ checks: ConfigChecks,
+ estate: ExternalState,
+ econfig: Optional[ExternalConfig] = None
+ ):
+ self.skale = skale
+ self.schain = schain
+ self.generation = schain['generation']
+ self.node_config = node_config
+ self.checks = checks
+ self.stream_version = stream_version
+
+ self.rotation_data = rotation_data
+ self.rotation_id = rotation_data['rotation_id']
+ self.estate = estate
+ self.econfig = econfig or ExternalConfig(name=schain['name'])
+ self.cfm: ConfigFileManager = ConfigFileManager(
+ schain_name=self.schain['name']
+ )
+ super().__init__(name=schain['name'])
+
+ @BaseActionManager.monitor_block
+ def config_dir(self) -> bool:
+ logger.info('Initializing config dir')
+ init_schain_config_dir(self.name)
+ return True
+
+ @BaseActionManager.monitor_block
+ def dkg(self) -> bool:
+ initial_status = self.checks.dkg.status
+ if not initial_status:
+ logger.info('Running safe_run_dkg')
+ dkg_result = safe_run_dkg(
+ skale=self.skale,
+ schain_name=self.name,
+ node_id=self.node_config.id,
+ sgx_key_name=self.node_config.sgx_key_name,
+ rotation_id=self.rotation_id
+ )
+ if dkg_result.status.is_done():
+ save_dkg_results(
+ dkg_result.keys_data,
+ get_secret_key_share_filepath(self.name, self.rotation_id)
+ )
+ self.schain_record.set_dkg_status(dkg_result.status)
+ if not dkg_result.status.is_done():
+ raise DkgError('DKG failed')
+ else:
+ logger.info('Dkg - ok')
+ return initial_status
+
+ @BaseActionManager.monitor_block
+ def upstream_config(self) -> bool:
+ logger.info(
+ 'Creating new upstream_config rotation_id: %s, stream: %s',
+ self.rotation_data.get('rotation_id'), self.stream_version
+ )
+ new_config = create_new_upstream_config(
+ skale=self.skale,
+ node_id=self.node_config.id,
+ schain_name=self.name,
+ generation=self.generation,
+ ecdsa_sgx_key_name=self.node_config.sgx_key_name,
+ rotation_data=self.rotation_data,
+ stream_version=self.stream_version,
+ schain_record=self.schain_record,
+ file_manager=self.cfm
+ )
+
+ result = False
+ if not self.cfm.upstream_config_exists() or new_config != self.cfm.latest_upstream_config:
+ rotation_id = self.rotation_data['rotation_id']
+ logger.info(
+ 'Saving new upstream config rotation_id: %d', rotation_id)
+ self.cfm.save_new_upstream(rotation_id, new_config)
+ result = True
+ else:
+ logger.info('Generated config is the same as latest upstream')
+
+ update_schain_config_version(
+ self.name, schain_record=self.schain_record)
+ return result
+
+ @BaseActionManager.monitor_block
+ def reset_config_record(self) -> bool:
+ update_schain_config_version(
+ self.name, schain_record=self.schain_record)
+ self.schain_record.set_sync_config_run(False)
+ return True
+
+ @BaseActionManager.monitor_block
+ def external_state(self) -> bool:
+ logger.info('Updating external state config')
+ logger.debug('New state %s', self.estate)
+ self.econfig.update(self.estate)
+ return True
+
+
+class SkaledActionManager(BaseActionManager):
+ def __init__(
+ self,
+ schain: dict,
+ rule_controller: IRuleController,
+ checks: SkaledChecks,
+ node_config: NodeConfig,
+ econfig: Optional[ExternalConfig] = None,
+ dutils: DockerUtils = None
+ ):
+ self.schain = schain
+ self.generation = schain['generation']
+ self.checks = checks
+ self.node_config = node_config
+
+ self.rc = rule_controller
+ self.skaled_status = init_skaled_status(self.schain['name'])
+ self.schain_type = get_schain_type(schain['partOfNode'])
+ self.econfig = econfig or ExternalConfig(schain['name'])
+ self.cfm: ConfigFileManager = ConfigFileManager(
+ schain_name=self.schain['name']
+ )
+
+ self.dutils = dutils or DockerUtils()
+
+ super().__init__(name=schain['name'])
+
+ @BaseActionManager.monitor_block
+ def volume(self) -> bool:
+ initial_status = self.checks.volume.status
+ if not initial_status:
+ logger.info('Creating volume')
+ init_data_volume(self.schain, dutils=self.dutils)
+ else:
+ logger.info('Volume - ok')
+ return initial_status
+
+ @BaseActionManager.monitor_block
+ def firewall_rules(self) -> bool:
+ initial_status = self.checks.firewall_rules.status
+ if not initial_status:
+ logger.info('Configuring firewall rules')
+
+ conf = self.cfm.skaled_config
+ base_port = get_base_port_from_config(conf)
+ node_ips = get_node_ips_from_config(conf)
+ own_ip = get_own_ip_from_config(conf)
+
+ logger.debug('Base port %d', base_port)
+
+ ranges = self.econfig.ranges
+ logger.info('Adding ranges %s', ranges)
+ self.rc.configure(
+ base_port=base_port,
+ own_ip=own_ip,
+ node_ips=node_ips,
+ sync_ip_ranges=ranges
+ )
+ self.rc.sync()
+ return initial_status
+
+ @BaseActionManager.monitor_block
+ def skaled_container(
+ self,
+ download_snapshot: bool = False,
+ start_ts: Optional[int] = None,
+ abort_on_exit: bool = True,
+ ) -> bool:
+ logger.info(
+ 'Starting skaled container watchman snapshot: %s, start_ts: %s',
+ download_snapshot,
+ start_ts
+ )
+ monitor_schain_container(
+ self.schain,
+ schain_record=self.schain_record,
+ skaled_status=self.skaled_status,
+ download_snapshot=download_snapshot,
+ start_ts=start_ts,
+ abort_on_exit=abort_on_exit,
+ dutils=self.dutils
+ )
+ time.sleep(CONTAINER_POST_RUN_DELAY)
+ return True
+
+ @BaseActionManager.monitor_block
+ def restart_skaled_container(self) -> bool:
+ initial_status = True
+ if is_container_exists(self.name, dutils=self.dutils):
+ logger.info('Skaled container exists, restarting')
+ restart_container(SCHAIN_CONTAINER, self.schain,
+ dutils=self.dutils)
+ else:
+ logger.info(
+ 'Skaled container doesn\'t exists, running skaled watchman')
+ initial_status = self.skaled_container()
+ return initial_status
+
+ @BaseActionManager.monitor_block
+ def restart_ima_container(self) -> bool:
+ initial_status = True
+ if is_container_exists(self.name, container_type=IMA_CONTAINER, dutils=self.dutils):
+ logger.info('IMA container exists, restarting')
+ restart_container(IMA_CONTAINER, self.schain, dutils=self.dutils)
+ else:
+ logger.info(
+ 'IMA container doesn\'t exists, running skaled watchman')
+ initial_status = self.ima_container()
+ return initial_status
+
+ @BaseActionManager.monitor_block
+ def reset_restart_counter(self) -> bool:
+ self.schain_record.set_restart_count(0)
+ return True
+
+ @BaseActionManager.monitor_block
+ def reloaded_skaled_container(self, abort_on_exit: bool = True) -> bool:
+ logger.info('Starting skaled from scratch')
+ initial_status = True
+ if is_container_exists(self.name, dutils=self.dutils):
+ logger.info('Removing skaled container')
+ remove_schain_container(self.name, dutils=self.dutils)
+ else:
+ logger.warning('Container doesn\'t exists')
+ self.schain_record.set_restart_count(0)
+ self.schain_record.set_failed_rpc_count(0)
+ self.schain_record.set_needs_reload(False)
+ initial_status = self.skaled_container(
+ abort_on_exit=abort_on_exit)
+ return initial_status
+
+ @BaseActionManager.monitor_block
+ def recreated_schain_containers(self, abort_on_exit: bool = True) -> bool:
+ logger.info('Restart skaled and IMA from scratch')
+ initial_status = True
+ # Remove IMA -> skaled, start skaled -> IMA
+ if is_container_exists(self.name, container_type=IMA_CONTAINER, dutils=self.dutils):
+ initial_status = False
+ remove_ima_container(self.name, dutils=self.dutils)
+ if is_container_exists(self.name, container_type=SCHAIN_CONTAINER, dutils=self.dutils):
+ initial_status = False
+ remove_schain_container(self.name, dutils=self.dutils)
+ # Reseting restart counters
+ self.schain_record.set_restart_count(0)
+ self.schain_record.set_failed_rpc_count(0)
+ self.schain_record.set_needs_reload(False)
+ self.skaled_container(abort_on_exit=abort_on_exit)
+ self.ima_container()
+ return initial_status
+
+ @BaseActionManager.monitor_block
+ def skaled_rpc(self) -> bool:
+ initial_status = self.checks.rpc.status
+ if not initial_status:
+ self.display_skaled_logs()
+ logger.info('Handling schain rpc')
+ handle_failed_schain_rpc(
+ self.schain,
+ schain_record=self.schain_record,
+ skaled_status=self.skaled_status,
+ dutils=self.dutils
+ )
+ else:
+ self.schain_record.set_failed_rpc_count(0)
+ logger.info('rpc - ok')
+ return initial_status
+
+ def ima_container(self) -> bool:
+ initial_status = self.checks.ima_container.status
+ migration_ts = get_ima_migration_ts(self.name)
+ logger.debug('Migration time for %s IMA - %d', self.name, migration_ts)
+ if not initial_status:
+ pull_new_image(type=IMA_CONTAINER, dutils=self.dutils)
+ ima_data = ImaData(
+ linked=self.econfig.ima_linked,
+ chain_id=self.econfig.chain_id
+ )
+ logger.info('Running IMA container watchman')
+ monitor_ima_container(
+ self.schain,
+ ima_data,
+ migration_ts=migration_ts,
+ dutils=self.dutils
+ )
+ else:
+ logger.info('ima_container - ok')
+ return initial_status
+
+ @BaseActionManager.monitor_block
+ def cleanup_schain_docker_entity(self) -> bool:
+ logger.info('Removing skaled docker artifacts')
+ remove_schain_container(self.name, dutils=self.dutils)
+ time.sleep(SCHAIN_CLEANUP_TIMEOUT)
+ remove_schain_volume(self.name, dutils=self.dutils)
+ return True
+
+ @BaseActionManager.monitor_block
+ def update_config(self) -> bool:
+ logger.info('Syncing skaled config with upstream')
+ return self.cfm.sync_skaled_config_with_upstream()
+
+ @BaseActionManager.monitor_block
+ def send_exit_request(self) -> None:
+ if self.skaled_status.exit_time_reached:
+ logger.info('Exit time has been already set')
+ return
+ finish_ts = self.upstream_finish_ts
+ logger.info('Trying to set skaled exit time %s', finish_ts)
+ if finish_ts is not None:
+ url = get_local_schain_http_endpoint_from_config(
+ self.cfm.skaled_config)
+ set_rotation_for_schain(url, finish_ts)
+
+ @BaseActionManager.monitor_block
+ def disable_backup_run(self) -> None:
+ logger.debug('Turning off backup mode')
+ self.schain_record.set_backup_run(False)
+
+ @property
+ def upstream_config_path(self) -> Optional[str]:
+ return self.cfm.latest_upstream_path
+
+ @property
+ def upstream_finish_ts(self) -> Optional[int]:
+ return get_finish_ts_from_latest_upstream(self.cfm)
+
+ @property
+ def finish_ts(self) -> Optional[int]:
+ return get_finish_ts_from_skaled_config(self.cfm)
+
+ def display_skaled_logs(self) -> None:
+ if is_container_exists(self.name, dutils=self.dutils):
+ container_name = get_container_name(SCHAIN_CONTAINER, self.name)
+ self.dutils.display_container_logs(container_name)
+ else:
+ logger.warning(
+ f'sChain {self.name}: container doesn\'t exists, could not show logs')
+
+ @BaseActionManager.monitor_block
+ def notify_repair_mode(self) -> None:
+ notify_repair_mode(
+ self.node_config.all(),
+ self.name
+ )
+
+ @BaseActionManager.monitor_block
+ def disable_repair_mode(self) -> None:
+ logger.info('Switching off repair mode')
+ self.schain_record.set_repair_mode(False)
diff --git a/core/schains/monitor/backup_monitor.py b/core/schains/monitor/backup_monitor.py
deleted file mode 100644
index ccd3b3a45..000000000
--- a/core/schains/monitor/backup_monitor.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of SKALE Admin
-#
-# Copyright (C) 2021 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import logging
-from core.schains.monitor.base_monitor import BaseMonitor
-
-
-logger = logging.getLogger(__name__)
-
-
-class BackupMonitor(BaseMonitor):
- @BaseMonitor.monitor_runner
- def run(self):
- self.config_dir()
- self.dkg()
- self.config()
- self.volume()
- self.firewall_rules()
- self.skaled_container(download_snapshot=True)
- self.skaled_rpc()
- self.ima_container()
diff --git a/core/schains/monitor/base_monitor.py b/core/schains/monitor/base_monitor.py
index c6402a999..f7ad91d59 100644
--- a/core/schains/monitor/base_monitor.py
+++ b/core/schains/monitor/base_monitor.py
@@ -17,55 +17,8 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import time
import logging
from abc import ABC, abstractmethod
-from datetime import datetime
-from functools import wraps
-
-from skale import Skale
-
-from core.node_config import NodeConfig
-from core.schains.checks import SChainChecks
-from core.schains.dkg import safe_run_dkg, save_dkg_results, DkgError
-from core.schains.dkg.utils import get_secret_key_share_filepath
-from core.schains.cleaner import (
- remove_ima_container,
- remove_schain_container,
- remove_schain_volume
-)
-from core.schains.firewall.types import IRuleController
-
-from core.schains.volume import init_data_volume
-from core.schains.rotation import get_schain_public_key
-
-from core.schains.limits import get_schain_type
-
-from core.schains.monitor.containers import monitor_schain_container, monitor_ima_container
-from core.schains.monitor.rpc import handle_failed_schain_rpc
-from core.schains.runner import (
- pull_new_image,
- restart_container,
- is_container_exists,
- get_container_name
-)
-from core.schains.config import init_schain_config, init_schain_config_dir
-from core.schains.config.directory import get_schain_config
-from core.schains.config.helper import (
- get_base_port_from_config,
- get_node_ips_from_config,
- get_own_ip_from_config
-)
-from core.schains.ima import get_migration_ts as get_ima_migration_ts, ImaData
-from core.schains.skaled_status import init_skaled_status
-
-from tools.docker_utils import DockerUtils
-from tools.notifications.messages import notify_checks, is_checks_passed
-from tools.str_formatters import arguments_list_string
-from tools.configs.containers import IMA_CONTAINER, SCHAIN_CONTAINER
-
-from web.models.schain import upsert_schain_record, set_first_run, SChainRecord
-
logger = logging.getLogger(__name__)
@@ -74,293 +27,7 @@
SCHAIN_CLEANUP_TIMEOUT = 10
-class BaseMonitor(ABC):
- def __init__(
- self,
- skale: Skale,
- ima_data: ImaData,
- schain: dict,
- node_config: NodeConfig,
- rotation_data: dict,
- checks: SChainChecks,
- rule_controller: IRuleController,
- dutils: DockerUtils = None
- ):
- self.skale = skale
- self.ima_data = ima_data
- self.schain = schain
- self.name = schain['name']
- self.generation = schain['generation']
- self.node_config = node_config
- self.checks = checks
- self.executed_blocks = {}
-
- self.rotation_data = rotation_data
- self.rotation_id = rotation_data['rotation_id']
- self.rc = rule_controller
-
- self.finish_ts = skale.node_rotation.get_schain_finish_ts(
- node_id=rotation_data['leaving_node'],
- schain_name=self.name
- )
- logger.info(f'sChain finish_ts calculated: {self.finish_ts}')
-
- self.skaled_status = init_skaled_status(self.name)
-
- self.schain_type = get_schain_type(schain['partOfNode'])
-
- self.dutils = dutils or DockerUtils()
- self.p = f'{type(self).__name__} - schain: {self.name} -'
-
- @property
- def schain_record(self):
- return upsert_schain_record(self.name)
-
- def _upd_last_seen(self) -> None:
- self.schain_record.set_monitor_last_seen(datetime.now())
-
- def _upd_schain_record(self) -> None:
- if self.schain_record.first_run:
- self.schain_record.set_restart_count(0)
- self.schain_record.set_failed_rpc_count(0)
- set_first_run(self.name, False)
- self.schain_record.set_new_schain(False)
- logger.info(
- f'sChain {self.name}: '
- f'restart_count - {self.schain_record.restart_count}, '
- f'failed_rpc_count - {self.schain_record.failed_rpc_count}'
- )
-
- def _run_all_checks(self, save_checks=True) -> None:
- checks_dict = self.checks.get_all(save=save_checks)
- if not is_checks_passed(checks_dict):
- notify_checks(self.name, self.node_config.all(), checks_dict)
-
- def monitor_block(f):
- @wraps(f)
- def _monitor_block(self, *args, **kwargs):
- ts = time.time()
- initial_status = f(self, *args, **kwargs)
- te = time.time()
- self.executed_blocks[f.__name__] = {
- 'ts': ts,
- 'te': te,
- 'initial_status': initial_status
- }
- return initial_status
- return _monitor_block
-
- def monitor_runner(f):
- @wraps(f)
- def _monitor_runner(self):
- logger.info(arguments_list_string({
- 'Monitor type': type(self).__name__,
- 'Rotation data': self.rotation_data,
- 'sChain record': SChainRecord.to_dict(self.schain_record)
- }, f'Starting monitor runner - {self.name}'))
-
- self._upd_last_seen()
- if not self.schain_record.first_run:
- self._run_all_checks()
- self._upd_schain_record()
- res = f(self)
- self._upd_last_seen()
- self.log_executed_blocks()
- logger.info(f'{self.p} finished monitor runner')
- return res
- return _monitor_runner
-
+class IMonitor(ABC):
@abstractmethod
def run(self):
pass
-
- @monitor_block
- def config_dir(self) -> bool:
- initial_status = self.checks.config_dir.status
- if not initial_status:
- init_schain_config_dir(self.name)
- else:
- logger.info(f'{self.p} config_dir - ok')
- return initial_status
-
- @monitor_block
- def dkg(self) -> bool:
- initial_status = self.checks.dkg.status
- if not initial_status:
- dkg_result = safe_run_dkg(
- skale=self.skale,
- schain_name=self.name,
- node_id=self.node_config.id,
- sgx_key_name=self.node_config.sgx_key_name,
- rotation_id=self.rotation_id
- )
- if dkg_result.status.is_done():
- save_dkg_results(
- dkg_result.keys_data,
- get_secret_key_share_filepath(self.name, self.rotation_id)
- )
- self.schain_record.set_dkg_status(dkg_result.status)
- if not dkg_result.status.is_done():
- raise DkgError(f'{self.p} DKG failed')
- else:
- logger.info(f'{self.p} dkg - ok')
- return initial_status
-
- @monitor_block
- def config(self, overwrite=False) -> bool:
- initial_status = self.checks.config.status
- if not initial_status or overwrite:
- init_schain_config(
- skale=self.skale,
- node_id=self.node_config.id,
- schain_name=self.name,
- generation=self.generation,
- ecdsa_sgx_key_name=self.node_config.sgx_key_name,
- rotation_data=self.rotation_data,
- schain_record=self.schain_record
- )
- else:
- logger.info(f'{self.p} config - ok')
- return initial_status
-
- @monitor_block
- def volume(self) -> bool:
- initial_status = self.checks.volume.status
- if not initial_status:
- init_data_volume(self.schain, dutils=self.dutils)
- else:
- logger.info(f'{self.p} volume - ok')
- return initial_status
-
- @monitor_block
- def firewall_rules(self, overwrite=False) -> bool:
- initial_status = self.checks.firewall_rules.status
- if not initial_status:
- logger.info('Configuring firewall rules')
- conf = get_schain_config(self.name)
- base_port = get_base_port_from_config(conf)
- node_ips = get_node_ips_from_config(conf)
- own_ip = get_own_ip_from_config(conf)
- self.rc.configure(
- base_port=base_port,
- own_ip=own_ip,
- node_ips=node_ips
- )
- self.rc.sync()
- return initial_status
-
- @monitor_block
- def skaled_container(self, download_snapshot: bool = False, delay_start: bool = False) -> bool:
- initial_status = self.checks.skaled_container.status
- if not initial_status:
- public_key, start_ts = None, None
-
- if download_snapshot:
- public_key = get_schain_public_key(self.skale, self.name)
- if delay_start:
- start_ts = self.finish_ts
-
- monitor_schain_container(
- self.schain,
- schain_record=self.schain_record,
- skaled_status=self.skaled_status,
- public_key=public_key,
- start_ts=start_ts,
- dutils=self.dutils
- )
- time.sleep(CONTAINER_POST_RUN_DELAY)
- else:
- self.schain_record.set_restart_count(0)
- logger.info(f'{self.p} skaled_container - ok')
- return initial_status
-
- @monitor_block
- def restart_skaled_container(self) -> bool:
- initial_status = True
- if not is_container_exists(self.name, dutils=self.dutils):
- logger.info(f'sChain {self.name}: container doesn\'t exits, running container...')
- initial_status = self.skaled_container()
- else:
- restart_container(SCHAIN_CONTAINER, self.schain, dutils=self.dutils)
- return initial_status
-
- @monitor_block
- def reloaded_skaled_container(self) -> bool:
- logger.info('Starting skaled with reloaded configuration')
- initial_status = True
- if is_container_exists(self.name, dutils=self.dutils):
- remove_schain_container(self.name, dutils=self.dutils)
- else:
- logger.warning(f'sChain {self.name}: container doesn\'t exists')
- initial_status = self.skaled_container()
- return initial_status
-
- @monitor_block
- def recreated_schain_containers(self) -> bool:
- """ Recreates both schain and IMA containers """
- logger.info('Restart skaled and IMA from scratch')
- initial_status = True
- # Remove IMA -> skaled, start skaled -> IMA
- if is_container_exists(self.name, container_type=IMA_CONTAINER, dutils=self.dutils):
- initial_status = False
- remove_ima_container(self.name, dutils=self.dutils)
- if is_container_exists(self.name, container_type=SCHAIN_CONTAINER, dutils=self.dutils):
- initial_status = False
- remove_schain_container(self.name, dutils=self.dutils)
- self.skaled_container()
- self.ima_container()
- return initial_status
-
- @monitor_block
- def skaled_rpc(self) -> bool:
- initial_status = self.checks.rpc.status
- if not initial_status:
- self.display_skaled_logs()
- handle_failed_schain_rpc(
- self.schain,
- schain_record=self.schain_record,
- skaled_status=self.skaled_status,
- dutils=self.dutils
- )
- else:
- self.schain_record.set_failed_rpc_count(0)
- logger.info(f'{self.p} rpc - ok')
- return initial_status
-
- @monitor_block
- def ima_container(self) -> bool:
- initial_status = self.checks.ima_container.status
- migration_ts = get_ima_migration_ts(self.name)
- logger.debug('Migration time for %s IMA - %d', self.name, migration_ts)
- if not initial_status:
- pull_new_image(type=IMA_CONTAINER, dutils=self.dutils)
- monitor_ima_container(
- self.schain,
- self.ima_data,
- migration_ts=migration_ts,
- dutils=self.dutils
- )
- else:
- logger.info(f'{self.p} ima_container - ok')
- return initial_status
-
- @monitor_block
- def cleanup_schain_docker_entity(self) -> bool:
- remove_schain_container(self.name, dutils=self.dutils)
- time.sleep(SCHAIN_CLEANUP_TIMEOUT)
- remove_schain_volume(self.name, dutils=self.dutils)
- return True
-
- def log_executed_blocks(self) -> None:
- logger.info(arguments_list_string(
- self.executed_blocks, f'Finished monitor runner - {self.name}'))
-
- def display_skaled_logs(self) -> None:
- if is_container_exists(self.name, dutils=self.dutils):
- container_name = get_container_name(SCHAIN_CONTAINER, self.name)
- self.dutils.display_container_logs(container_name)
- else:
- logger.warning(f'sChain {self.name}: container doesn\'t exists, could not show logs')
-
- monitor_runner = staticmethod(monitor_runner)
- monitor_block = staticmethod(monitor_block)
diff --git a/core/schains/monitor/config_monitor.py b/core/schains/monitor/config_monitor.py
new file mode 100644
index 000000000..7a85f2694
--- /dev/null
+++ b/core/schains/monitor/config_monitor.py
@@ -0,0 +1,64 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of SKALE Admin
+#
+# Copyright (C) 2021 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import logging
+from abc import abstractmethod
+
+from core.schains.checks import ConfigChecks
+from core.schains.monitor.base_monitor import IMonitor
+from core.schains.monitor.action import ConfigActionManager
+
+
+logger = logging.getLogger(__name__)
+
+
+class BaseConfigMonitor(IMonitor):
+ def __init__(
+ self,
+ action_manager: ConfigActionManager,
+ checks: ConfigChecks
+ ) -> None:
+ self.am = action_manager
+ self.checks = checks
+
+ @abstractmethod
+ def execute(self) -> None:
+ pass
+
+ def run(self):
+ typename = type(self).__name__
+ logger.info('Config monitor type %s starting', typename)
+ self.am._upd_last_seen()
+ self.execute()
+ self.am.log_executed_blocks()
+ self.am._upd_last_seen()
+ logger.info('Config monitor type %s finished', typename)
+
+
+class RegularConfigMonitor(BaseConfigMonitor):
+ def execute(self) -> None:
+ if not self.checks.config_dir:
+ self.am.config_dir()
+ if not self.checks.dkg:
+ self.am.dkg()
+ if not self.checks.external_state:
+ self.am.external_state()
+ if not self.checks.upstream_config:
+ self.am.upstream_config()
+ self.am.reset_config_record()
diff --git a/core/schains/monitor/containers.py b/core/schains/monitor/containers.py
index 8264280b8..66ee3b488 100644
--- a/core/schains/monitor/containers.py
+++ b/core/schains/monitor/containers.py
@@ -50,8 +50,9 @@ def monitor_schain_container(
schain,
schain_record,
skaled_status,
- public_key=None,
+ download_snapshot=False,
start_ts=None,
+ abort_on_exit: bool = True,
dutils=None
) -> None:
dutils = dutils or DockerUtils()
@@ -62,33 +63,35 @@ def monitor_schain_container(
logger.error(f'Data volume for sChain {schain_name} does not exist')
return
+ if skaled_status.exit_time_reached and abort_on_exit:
+ logger.info(
+ f'{schain_name} - Skipping container monitor: exit time reached')
+ skaled_status.log()
+ schain_record.reset_failed_counters()
+ return
+
if not is_container_exists(schain_name, dutils=dutils):
logger.info(f'SChain {schain_name}: container doesn\'t exits')
run_schain_container(
schain=schain,
- public_key=public_key,
+ download_snapshot=download_snapshot,
start_ts=start_ts,
snapshot_from=schain_record.snapshot_from,
dutils=dutils
)
- schain_record.reset_failed_conunters()
- return
-
- if skaled_status.exit_time_reached:
- logger.info(f'{schain_name} - Skipping container monitor: exit time reached')
- skaled_status.log()
- schain_record.reset_failed_conunters()
+ schain_record.reset_failed_counters()
return
if skaled_status.clear_data_dir and skaled_status.start_from_snapshot:
- logger.info(f'{schain_name} - Skipping container monitor: sChain should be repaired')
+ logger.info(
+ f'{schain_name} - Skipping container monitor: sChain should be repaired')
skaled_status.log()
- schain_record.reset_failed_conunters()
+ schain_record.reset_failed_counters()
return
if is_schain_container_failed(schain_name, dutils=dutils):
if schain_record.restart_count < MAX_SCHAIN_RESTART_COUNT:
- logger.info(f'SChain {schain_name}: restarting container')
+ logger.info('sChain %s: restarting container', schain_name)
restart_container(SCHAIN_CONTAINER, schain, dutils=dutils)
schain_record.set_restart_count(schain_record.restart_count + 1)
schain_record.set_failed_rpc_count(0)
@@ -98,6 +101,8 @@ def monitor_schain_container(
schain_name,
MAX_SCHAIN_RESTART_COUNT
)
+ else:
+ schain_record.set_restart_count(0)
def monitor_ima_container(
@@ -118,23 +123,27 @@ def monitor_ima_container(
copy_schain_ima_abi(schain_name)
- container_exists = is_container_exists(schain_name, container_type=IMA_CONTAINER, dutils=dutils)
+ container_exists = is_container_exists(
+ schain_name, container_type=IMA_CONTAINER, dutils=dutils)
container_image = get_container_image(schain_name, IMA_CONTAINER, dutils)
new_image = get_image_name(type=IMA_CONTAINER, new=True)
expected_image = get_image_name(type=IMA_CONTAINER)
- logger.debug('%s IMA image %s, expected %s', schain_name, container_image, expected_image)
+ logger.debug('%s IMA image %s, expected %s', schain_name,
+ container_image, expected_image)
if time.time() > migration_ts:
logger.debug('%s IMA migration time passed', schain_name)
expected_image = new_image
if container_exists and expected_image != container_image:
- logger.info('%s Removing old container as part of IMA migration', schain_name)
+ logger.info(
+ '%s Removing old container as part of IMA migration', schain_name)
remove_container(schain_name, IMA_CONTAINER, dutils)
container_exists = False
if not container_exists:
- logger.info('%s No IMA container, creating, image %s', schain_name, expected_image)
+ logger.info('%s No IMA container, creating, image %s',
+ schain_name, expected_image)
run_ima_container(
schain,
ima_data.chain_id,
@@ -142,4 +151,5 @@ def monitor_ima_container(
dutils=dutils
)
else:
- logger.debug('sChain %s: IMA container exists, but not running, skipping', schain_name)
+ logger.debug(
+ 'sChain %s: IMA container exists, but not running, skipping', schain_name)
diff --git a/core/schains/monitor/main.py b/core/schains/monitor/main.py
index 55e58557e..78ab26ba4 100644
--- a/core/schains/monitor/main.py
+++ b/core/schains/monitor/main.py
@@ -17,177 +17,250 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import functools
import time
import random
import logging
+from typing import Dict
+from concurrent.futures import Future, ThreadPoolExecutor
from importlib import reload
+from typing import List, Optional
-from web3._utils import request
+from skale import Skale, SkaleIma
+from web3._utils import request as web3_request
+from core.node import get_skale_node_version
from core.node_config import NodeConfig
-from core.schains.checks import SChainChecks
+from core.schains.checks import (
+ ConfigChecks,
+ get_api_checks_status,
+ TG_ALLOWED_CHECKS,
+ SkaledChecks
+)
+from core.schains.config.file_manager import ConfigFileManager
from core.schains.firewall import get_default_rule_controller
-from core.schains.ima import ImaData
+from core.schains.firewall.utils import get_sync_agent_ranges
from core.schains.monitor import (
- BaseMonitor,
- BackupMonitor,
- PostRotationMonitor,
- RegularMonitor,
- RepairMonitor,
- RotationMonitor,
- ReloadMonitor
+ get_skaled_monitor,
+ RegularConfigMonitor
)
-from core.schains.firewall.utils import get_sync_agent_ranges
-from core.schains.skaled_status import init_skaled_status, SkaledStatus
-
+from core.schains.monitor.action import ConfigActionManager, SkaledActionManager
+from core.schains.external_config import ExternalConfig, ExternalState
+from core.schains.task import keep_tasks_running, Task
+from core.schains.skaled_status import get_skaled_status
from tools.docker_utils import DockerUtils
-from tools.configs import BACKUP_RUN
from tools.configs.ima import DISABLE_IMA
+from tools.notifications.messages import notify_checks
from tools.helper import is_node_part_of_chain
+from web.models.schain import SChainRecord
-from web.models.schain import upsert_schain_record, SChainRecord
+MIN_SCHAIN_MONITOR_SLEEP_INTERVAL = 20
+MAX_SCHAIN_MONITOR_SLEEP_INTERVAL = 40
-MIN_SCHAIN_MONITOR_SLEEP_INTERVAL = 90
-MAX_SCHAIN_MONITOR_SLEEP_INTERVAL = 180
-
+SKALED_PIPELINE_SLEEP = 2
+CONFIG_PIPELINE_SLEEP = 3
logger = logging.getLogger(__name__)
-def get_log_prefix(name):
- return f'schain: {name} -'
-
-
-def _is_backup_mode(schain_record: SChainRecord) -> bool:
- return schain_record.first_run and not schain_record.new_schain and BACKUP_RUN
-
-
-def _is_repair_mode(
- schain_record: SChainRecord,
- checks: SChainChecks,
- skaled_status: SkaledStatus
-) -> bool:
- return schain_record.repair_mode or _is_skaled_repair_status(checks, skaled_status)
-
-
-def _is_rotation_mode(is_rotation_active: bool) -> bool:
- return is_rotation_active
-
-
-def _is_post_rotation_mode(checks: SChainChecks, skaled_status: SkaledStatus) -> bool:
- skaled_status.log()
- return not checks.skaled_container.status and skaled_status.exit_time_reached
-
-
-def _is_reload_mode(schain_record: SChainRecord) -> bool:
- return schain_record.needs_reload
-
-
-def _is_skaled_repair_status(checks: SChainChecks, skaled_status: SkaledStatus) -> bool:
- skaled_status.log()
- needs_repair = skaled_status.clear_data_dir and skaled_status.start_from_snapshot
- return not checks.skaled_container.status and needs_repair
-
-
-def _is_skaled_reload_status(checks: SChainChecks, skaled_status: SkaledStatus) -> bool:
- skaled_status.log()
- needs_reload = skaled_status.start_again and not skaled_status.start_from_snapshot
- return not checks.skaled_container.status and needs_reload
-
-
-def get_monitor_type(
- schain_record: SChainRecord,
- checks: SChainChecks,
- is_rotation_active: bool,
- skaled_status: SkaledStatus
- ) -> BaseMonitor:
- if _is_backup_mode(schain_record):
- return BackupMonitor
- if _is_repair_mode(schain_record, checks, skaled_status):
- return RepairMonitor
- if _is_rotation_mode(is_rotation_active):
- return RotationMonitor
- if _is_post_rotation_mode(checks, skaled_status):
- return PostRotationMonitor
- if _is_reload_mode(schain_record):
- return ReloadMonitor
- return RegularMonitor
-
-
-def run_monitor_for_schain(skale, skale_ima, node_config: NodeConfig, schain, dutils=None,
- once=False):
- p = get_log_prefix(schain["name"])
-
- def post_monitor_sleep():
- schain_monitor_sleep = random.randint(
- MIN_SCHAIN_MONITOR_SLEEP_INTERVAL,
- MAX_SCHAIN_MONITOR_SLEEP_INTERVAL
- )
- logger.info(f'{p} monitor completed, sleeping for {schain_monitor_sleep}s...')
- time.sleep(schain_monitor_sleep)
-
- while True:
- try:
- logger.info(f'{p} monitor created')
- reload(request) # fix for web3py multiprocessing issue (see SKALE-4251)
-
- name = schain["name"]
- dutils = dutils or DockerUtils()
-
- is_rotation_active = skale.node_rotation.is_rotation_active(name)
-
- if not is_node_part_of_chain(skale, name, node_config.id) and not is_rotation_active:
- logger.warning(f'{p} NOT ON NODE ({node_config.id}), finising process...')
- return True
-
- ima_linked = not DISABLE_IMA and skale_ima.linker.has_schain(name)
- rotation_data = skale.node_rotation.get_rotation(name)
-
- sync_agent_ranges = get_sync_agent_ranges(skale)
-
- rc = get_default_rule_controller(
- name=name,
- sync_agent_ranges=sync_agent_ranges
- )
- schain_record = upsert_schain_record(name)
- checks = SChainChecks(
- name,
- node_config.id,
- schain_record=schain_record,
- rule_controller=rc,
- rotation_id=rotation_data['rotation_id'],
- ima_linked=ima_linked,
- dutils=dutils
- )
-
- ima_data = ImaData(
- linked=ima_linked,
- chain_id=skale_ima.web3.eth.chainId
- )
- skaled_status = init_skaled_status(name)
-
- monitor_class = get_monitor_type(
- schain_record,
- checks,
- is_rotation_active,
- skaled_status
- )
- monitor = monitor_class(
- skale=skale,
- ima_data=ima_data,
- schain=schain,
- node_config=node_config,
- rotation_data=rotation_data,
- checks=checks,
- rule_controller=rc
- )
- monitor.run()
- if once:
- return True
- post_monitor_sleep()
- except Exception:
- logger.exception(f'{p} monitor failed')
- if once:
- return False
- post_monitor_sleep()
+def run_config_pipeline(
+ skale: Skale,
+ skale_ima: SkaleIma,
+ schain: Dict,
+ node_config: NodeConfig,
+ stream_version: str
+) -> None:
+ name = schain['name']
+ schain_record = SChainRecord.get_by_name(name)
+ rotation_data = skale.node_rotation.get_rotation(name)
+ allowed_ranges = get_sync_agent_ranges(skale)
+ ima_linked = not DISABLE_IMA and skale_ima.linker.has_schain(name)
+
+ estate = ExternalState(
+ ima_linked=ima_linked,
+ chain_id=skale_ima.web3.eth.chain_id,
+ ranges=allowed_ranges
+ )
+ econfig = ExternalConfig(name)
+ config_checks = ConfigChecks(
+ schain_name=name,
+ node_id=node_config.id,
+ schain_record=schain_record,
+ stream_version=stream_version,
+ rotation_id=rotation_data['rotation_id'],
+ econfig=econfig,
+ estate=estate
+ )
+
+ config_am = ConfigActionManager(
+ skale=skale,
+ schain=schain,
+ node_config=node_config,
+ rotation_data=rotation_data,
+ stream_version=stream_version,
+ checks=config_checks,
+ estate=estate,
+ econfig=econfig
+ )
+
+ status = config_checks.get_all(log=False)
+ logger.info('Config checks: %s', status)
+ mon = RegularConfigMonitor(config_am, config_checks)
+ mon.run()
+
+
+def run_skaled_pipeline(
+ skale: Skale,
+ schain: Dict,
+ node_config: NodeConfig,
+ dutils: DockerUtils
+) -> None:
+ name = schain['name']
+ schain_record = SChainRecord.get_by_name(name)
+
+ dutils = dutils or DockerUtils()
+
+ rc = get_default_rule_controller(name=name)
+ skaled_checks = SkaledChecks(
+ schain_name=schain['name'],
+ schain_record=schain_record,
+ rule_controller=rc,
+ dutils=dutils
+ )
+
+ skaled_status = get_skaled_status(name)
+
+ skaled_am = SkaledActionManager(
+ schain=schain,
+ rule_controller=rc,
+ checks=skaled_checks,
+ node_config=node_config,
+ econfig=ExternalConfig(name),
+ dutils=dutils
+ )
+ status = skaled_checks.get_all(log=False)
+ api_status = get_api_checks_status(
+ status=status, allowed=TG_ALLOWED_CHECKS)
+ notify_checks(name, node_config.all(), api_status)
+
+ logger.info('Skaled status: %s', status)
+
+ logger.info('Upstream config %s', skaled_am.upstream_config_path)
+ mon = get_skaled_monitor(
+ action_manager=skaled_am,
+ status=status,
+ schain_record=schain_record,
+ skaled_status=skaled_status
+ )
+ mon(skaled_am, skaled_checks).run()
+
+
+def post_monitor_sleep():
+ schain_monitor_sleep = random.randint(
+ MIN_SCHAIN_MONITOR_SLEEP_INTERVAL,
+ MAX_SCHAIN_MONITOR_SLEEP_INTERVAL
+ )
+ logger.info('Monitor iteration completed, sleeping for %d',
+ schain_monitor_sleep)
+ time.sleep(schain_monitor_sleep)
+
+
+def create_and_execute_tasks(
+ skale,
+ schain,
+ node_config: NodeConfig,
+ skale_ima: SkaleIma,
+ stream_version,
+ schain_record,
+ executor,
+ futures,
+ dutils
+):
+ reload(web3_request)
+ name = schain['name']
+
+ is_rotation_active = skale.node_rotation.is_rotation_active(name)
+
+ leaving_chain = not is_node_part_of_chain(skale, name, node_config.id)
+ if leaving_chain and not is_rotation_active:
+ logger.info('Not on node (%d), finishing process', node_config.id)
+ return True
+
+ logger.info(
+ 'sync_config_run %s, config_version %s, stream_version %s',
+ schain_record.sync_config_run, schain_record.config_version, stream_version
+ )
+ tasks = []
+ if not leaving_chain:
+ logger.info('Adding config task to the pool')
+ tasks.append(
+ Task(
+ f'{name}-config',
+ functools.partial(
+ run_config_pipeline,
+ skale=skale,
+ skale_ima=skale_ima,
+ schain=schain,
+ node_config=node_config,
+ stream_version=stream_version
+ ),
+ sleep=CONFIG_PIPELINE_SLEEP
+ ))
+ if schain_record.config_version != stream_version or \
+ (schain_record.sync_config_run and schain_record.first_run):
+ ConfigFileManager(name).remove_skaled_config()
+ else:
+ logger.info('Adding skaled task to the pool')
+ tasks.append(
+ Task(
+ f'{name}-skaled',
+ functools.partial(
+ run_skaled_pipeline,
+ skale=skale,
+ schain=schain,
+ node_config=node_config,
+ dutils=dutils
+ ),
+ sleep=SKALED_PIPELINE_SLEEP
+ ))
+
+ if len(tasks) == 0:
+ logger.warning('No tasks to run')
+ keep_tasks_running(executor, tasks, futures)
+
+
+def run_monitor_for_schain(
+ skale,
+ skale_ima,
+ node_config: NodeConfig,
+ schain,
+ dutils=None,
+ once=False
+):
+ stream_version = get_skale_node_version()
+ tasks_number = 2
+ with ThreadPoolExecutor(max_workers=tasks_number, thread_name_prefix='T') as executor:
+ futures: List[Optional[Future]] = [None for i in range(tasks_number)]
+ while True:
+ schain_record = SChainRecord.get_by_name(schain['name'])
+ try:
+ create_and_execute_tasks(
+ skale,
+ schain,
+ node_config,
+ skale_ima,
+ stream_version,
+ schain_record,
+ executor,
+ futures,
+ dutils
+ )
+ if once:
+ return True
+ post_monitor_sleep()
+ except Exception:
+ logger.exception('Monitor iteration failed')
+ if once:
+ return False
+ post_monitor_sleep()
diff --git a/core/schains/monitor/regular_monitor.py b/core/schains/monitor/regular_monitor.py
deleted file mode 100644
index b92a812ad..000000000
--- a/core/schains/monitor/regular_monitor.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of SKALE Admin
-#
-# Copyright (C) 2021 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import logging
-from core.schains.monitor.base_monitor import BaseMonitor
-
-
-logger = logging.getLogger(__name__)
-
-
-class RegularMonitor(BaseMonitor):
- @BaseMonitor.monitor_runner
- def run(self):
- self.config_dir()
- self.dkg()
- self.config()
- self.volume()
- self.firewall_rules()
- self.skaled_container()
- self.skaled_rpc()
- self.ima_container()
diff --git a/core/schains/monitor/reload_monitor.py b/core/schains/monitor/reload_monitor.py
deleted file mode 100644
index 5955ff84b..000000000
--- a/core/schains/monitor/reload_monitor.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of SKALE Admin
-#
-# Copyright (C) 2021 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import logging
-
-from core.schains.monitor import BaseMonitor
-
-logger = logging.getLogger(__name__)
-
-
-class ReloadMonitor(BaseMonitor):
- """
- ReloadMonitor is executed when new SSL certificates were uploaded or when reload is requested
- """
- @BaseMonitor.monitor_runner
- def run(self):
- logger.info(
- '%s. Reload requested. Going to restart sChain container',
- self.p
- )
- self.reloaded_skaled_container()
- record = self.schain_record
- record.set_restart_count(0)
- record.set_failed_rpc_count(0)
- record.set_needs_reload(False)
diff --git a/core/schains/monitor/repair_monitor.py b/core/schains/monitor/repair_monitor.py
deleted file mode 100644
index a700e694d..000000000
--- a/core/schains/monitor/repair_monitor.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of SKALE Admin
-#
-# Copyright (C) 2021 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import logging
-from core.schains.monitor.base_monitor import BaseMonitor
-from tools.notifications.messages import notify_repair_mode
-from web.models.schain import switch_off_repair_mode
-
-logger = logging.getLogger(__name__)
-
-
-class RepairMonitor(BaseMonitor):
- """
- RepairMonitor could be executed for the sChain in 2 cases:
- 1. Repair mode was toggled by node owner manually
- 2. Wrong exit code on skaled container (currently only 200 exit code is handled)
-
- In this mode container and volume are removed and replaced with a new ones, in a sync mode.
- """
-
- def notify_repair_mode(self) -> None:
- notify_repair_mode(
- self.node_config.all(),
- self.name
- )
-
- def disable_repair_mode(self) -> None:
- switch_off_repair_mode(self.name)
-
- @BaseMonitor.monitor_runner
- def run(self):
- logger.warning(f'REPAIR MODE was toggled - \
-repair_mode: {self.schain_record.repair_mode}, exit_code_ok: {self.checks.exit_code_ok.status}')
- self.notify_repair_mode()
- self.cleanup_schain_docker_entity()
- self.volume()
- self.skaled_container(download_snapshot=True)
- self.skaled_rpc()
- self.disable_repair_mode()
diff --git a/core/schains/monitor/rotation_monitor.py b/core/schains/monitor/rotation_monitor.py
deleted file mode 100644
index 3ee5edc39..000000000
--- a/core/schains/monitor/rotation_monitor.py
+++ /dev/null
@@ -1,112 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of SKALE Admin
-#
-# Copyright (C) 2021 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import logging
-
-from core.schains.monitor.base_monitor import BaseMonitor
-from core.schains.rotation import set_rotation_for_schain
-from skale.schain_config.rotation_history import get_previous_schain_groups, get_new_nodes_list
-
-logger = logging.getLogger(__name__)
-
-
-class RotationMonitor(BaseMonitor):
- """
- RotationMonitor could be executed for the sChain when rotation is in progress for this chain.
- In this monitor mode there are 3 possible sub-modes:
-
- 1. New node - when current node was added to the existing group
- 2. Leaving node - when current node was removed from the existing group
- 3. Staying node - when current node staying in the group
- """
-
- def _is_new_rotation_node(self):
- return self.rotation_data['new_node'] == self.node_config.id
-
- def _is_new_node(self) -> bool:
- """
- New node monitor runs in 2 cases during rotation:
- 1. When the current node is marked as a new node
- 2. When the current node doesn't have SKALE chain config file created
- """
- if self._is_new_rotation_node():
- logger.info(f'{self.p} current node is the new node in this rotation')
- return True
- node_groups = get_previous_schain_groups(
- skale=self.skale,
- schain_name=self.name,
- leaving_node_id=self.rotation_data['leaving_node'],
- include_keys=False
- )
- new_nodes = get_new_nodes_list(
- skale=self.skale,
- name=self.name,
- node_groups=node_groups
- )
- logger.info(f'{self.p} new nodes: {new_nodes}, current node: {self.node_config.id}')
- if self.node_config.id in new_nodes:
- logger.info(f'{self.p} current node is one of the new nodes in this rotation')
- return True
- return False
-
- def _is_leaving_node(self) -> bool:
- return self.rotation_data['leaving_node'] == self.node_config.id
-
- def rotation_request(self) -> None:
- set_rotation_for_schain(self.name, self.finish_ts)
-
- def new_node(self) -> None:
- self.config_dir()
- self.dkg()
- self.config()
- self.volume()
- self.firewall_rules()
- self.skaled_container(download_snapshot=True, delay_start=True)
- self.ima_container()
-
- def leaving_node(self) -> None:
- self.firewall_rules()
- self.skaled_container()
- self.skaled_rpc()
- self.ima_container()
- self.rotation_request()
-
- def staying_node(self) -> None:
- self.firewall_rules()
- self.skaled_container()
- self.skaled_rpc()
- self.ima_container()
- self.dkg()
- self.rotation_request()
-
- def get_rotation_mode_func(self):
- if self._is_leaving_node():
- return self.leaving_node
- if self._is_new_node():
- return self.new_node
- return self.staying_node
-
- @BaseMonitor.monitor_runner
- def run(self):
- rotation_mode_func = self.get_rotation_mode_func()
- logger.info(
- f'sChain: {self.name} running {type(self).__name__} '
- f'type: {rotation_mode_func}'
- )
- return rotation_mode_func()
diff --git a/core/schains/monitor/skaled_monitor.py b/core/schains/monitor/skaled_monitor.py
new file mode 100644
index 000000000..7909f4a77
--- /dev/null
+++ b/core/schains/monitor/skaled_monitor.py
@@ -0,0 +1,291 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of SKALE Admin
+#
+# Copyright (C) 2021 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import logging
+import time
+from abc import abstractmethod
+from typing import Dict, Optional, Type
+
+from core.schains.monitor.base_monitor import IMonitor
+from core.schains.checks import SkaledChecks
+from core.schains.monitor.action import SkaledActionManager
+from core.schains.config.main import get_number_of_secret_shares
+from core.schains.skaled_status import SkaledStatus
+from web.models.schain import SChainRecord
+
+
+logger = logging.getLogger(__name__)
+
+
+class BaseSkaledMonitor(IMonitor):
+ def __init__(
+ self,
+ action_manager: SkaledActionManager,
+ checks: SkaledChecks
+ ) -> None:
+ self.am = action_manager
+ self.checks = checks
+
+ @abstractmethod
+ def execute(self) -> None:
+ pass
+
+ def run(self):
+ typename = type(self).__name__
+ logger.info('Skaled monitor type %s starting', typename)
+ self.am._upd_last_seen()
+ self.execute()
+ self.am._upd_schain_record()
+ self.am.log_executed_blocks()
+ self.am._upd_last_seen()
+ logger.info('Skaled monitor type %s finished', typename)
+
+
+class RegularSkaledMonitor(BaseSkaledMonitor):
+
+ def execute(self) -> None:
+ if not self.checks.firewall_rules:
+ self.am.firewall_rules()
+ if not self.checks.volume:
+ self.am.volume()
+ if not self.checks.skaled_container:
+ self.am.skaled_container()
+ else:
+ self.am.reset_restart_counter()
+ if not self.checks.rpc:
+ self.am.skaled_rpc()
+ if not self.checks.ima_container:
+ self.am.ima_container()
+
+
+class RepairSkaledMonitor(BaseSkaledMonitor):
+ """
+ When node-cli or skaled requested repair mode -
+ remove volume and download snapshot
+ """
+
+ def execute(self) -> None:
+ logger.warning(
+ 'Repair mode execution, record: %s, exit_code_ok: %s',
+ self.checks.schain_record.repair_mode,
+ self.checks.exit_code_ok.status
+ )
+ self.am.notify_repair_mode()
+ self.am.cleanup_schain_docker_entity()
+ if not self.checks.firewall_rules:
+ self.am.firewall_rules()
+ if not self.checks.volume:
+ self.am.volume()
+ if not self.checks.skaled_container:
+ self.am.skaled_container(download_snapshot=True)
+ else:
+ self.am.reset_restart_count()
+ self.am.disable_repair_mode()
+
+
+class BackupSkaledMonitor(BaseSkaledMonitor):
+ """
+ When skaled monitor run after backup for the first time -
+ download snapshot
+ """
+
+ def execute(self) -> None:
+ if not self.checks.volume:
+ self.am.volume()
+ if not self.checks.firewall_rules:
+ self.am.firewall_rules()
+ if not self.checks.skaled_container:
+ self.am.skaled_container(download_snapshot=True)
+ else:
+ self.am.reset_restart_counter()
+ if not self.checks.ima_container:
+ self.am.ima_container()
+ self.am.disable_backup_run()
+
+
+class RecreateSkaledMonitor(BaseSkaledMonitor):
+ """
+ When recreate requested from node-cli (currently only for new SSL certs) -
+ safely remove skaled container and start again
+ """
+
+ def execute(self) -> None:
+ logger.info('Reload requested. Recreating sChain container')
+ if not self.checks.volume:
+ self.am.volume()
+ self.am.reloaded_skaled_container()
+
+
+class UpdateConfigSkaledMonitor(BaseSkaledMonitor):
+ """
+ If config is outdated, skaled container exited and ExitTimeReached true -
+ sync config with upstream and restart skaled container
+ """
+
+ def execute(self) -> None:
+ if not self.checks.config_updated:
+ self.am.update_config()
+ if not self.checks.firewall_rules:
+ self.am.firewall_rules()
+ if not self.checks.volume:
+ self.am.volume()
+ self.am.recreated_schain_containers(abort_on_exit=False)
+
+
+class NewConfigSkaledMonitor(BaseSkaledMonitor):
+ """
+ When config is outdated request setExitTime with latest finish_ts from config
+ """
+
+ def execute(self):
+ if not self.checks.firewall_rules:
+ self.am.firewall_rules()
+ if not self.checks.volume:
+ self.am.volume()
+ if not self.checks.skaled_container:
+ self.am.skaled_container()
+ else:
+ self.am.reset_restart_counter()
+ if not self.checks.rpc:
+ self.am.skaled_rpc()
+ if not self.checks.ima_container:
+ self.am.ima_container()
+ self.am.send_exit_request()
+
+
+class NoConfigSkaledMonitor(BaseSkaledMonitor):
+ """
+ When there is no skaled config - sync with upstream
+ assuming it's exists
+ """
+
+ def execute(self):
+ if self.checks.upstream_exists:
+ logger.info('Creating skaled config')
+ self.am.update_config()
+ else:
+ logger.debug('Waiting for upstream config')
+
+
+class NewNodeSkaledMonitor(BaseSkaledMonitor):
+ """
+ When finish_ts is in the future and there is only one secret key share -
+ download snapshot and shedule start after finish_ts
+ """
+
+ def execute(self):
+ if not self.checks.volume:
+ self.am.volume()
+ if not self.checks.firewall_rules:
+ self.am.firewall_rules()
+ if not self.checks.skaled_container:
+ self.am.skaled_container(
+ download_snapshot=True,
+ start_ts=self.am.finish_ts
+ )
+ else:
+ self.am.reset_restart_counter()
+ if not self.checks.ima_container:
+ self.am.ima_container()
+
+
+def is_backup_mode(schain_record: SChainRecord) -> bool:
+ return schain_record.backup_run and not schain_record.new_schain
+
+
+def is_repair_mode(
+ schain_record: SChainRecord,
+ status: Dict,
+ skaled_status: Optional[SkaledStatus]
+) -> bool:
+ return schain_record.repair_mode or is_skaled_repair_status(status, skaled_status)
+
+
+def is_new_config_mode(
+ status: Dict,
+ finish_ts: Optional[int]
+) -> bool:
+ ts = int(time.time())
+ if finish_ts is None:
+ return False
+ return finish_ts > ts and status['config'] and not status['config_updated']
+
+
+def is_config_update_time(
+ status: Dict,
+ skaled_status: Optional[SkaledStatus]
+) -> bool:
+ if not skaled_status:
+ return False
+ return not status['config_updated'] and \
+ not status['skaled_container'] and \
+ skaled_status.exit_time_reached
+
+
+def is_recreate_mode(schain_record: SChainRecord) -> bool:
+ return schain_record.needs_reload
+
+
+def is_new_node_mode(schain_record: SChainRecord, finish_ts: Optional[int]) -> bool:
+ ts = int(time.time())
+ secret_shares_number = get_number_of_secret_shares(schain_record.name)
+ if finish_ts is None:
+ return False
+ return finish_ts > ts and secret_shares_number == 1
+
+
+def is_skaled_repair_status(status: Dict, skaled_status: Optional[SkaledStatus]) -> bool:
+ if skaled_status is None:
+ return False
+ skaled_status.log()
+ needs_repair = skaled_status.clear_data_dir and skaled_status.start_from_snapshot
+ return not status['skaled_container'] and needs_repair
+
+
+def no_config(status: Dict) -> bool:
+ return not status['config']
+
+
+def get_skaled_monitor(
+ action_manager: SkaledActionManager,
+ status: Dict,
+ schain_record: SChainRecord,
+ skaled_status: SkaledStatus
+) -> Type[BaseSkaledMonitor]:
+ logger.info('Choosing skaled monitor')
+ if skaled_status:
+ skaled_status.log()
+
+ mon_type: Type[BaseSkaledMonitor] = RegularSkaledMonitor
+ if no_config(status):
+ mon_type = NoConfigSkaledMonitor
+ elif is_backup_mode(schain_record):
+ mon_type = BackupSkaledMonitor
+ elif is_repair_mode(schain_record, status, skaled_status):
+ mon_type = RepairSkaledMonitor
+ elif is_recreate_mode(schain_record):
+ mon_type = RecreateSkaledMonitor
+ elif is_new_node_mode(schain_record, action_manager.finish_ts):
+ mon_type = NewNodeSkaledMonitor
+ elif is_config_update_time(status, skaled_status):
+ mon_type = UpdateConfigSkaledMonitor
+ elif is_new_config_mode(status, action_manager.upstream_finish_ts):
+ mon_type = NewConfigSkaledMonitor
+
+ return mon_type
diff --git a/core/schains/notifications.py b/core/schains/notifications.py
index 0371595ec..dda6fe9ba 100644
--- a/core/schains/notifications.py
+++ b/core/schains/notifications.py
@@ -31,8 +31,8 @@
def notify_if_not_enough_balance(skale: Skale, node_info: Dict) -> None:
- eth_balance_wei = skale.web3.eth.getBalance(skale.wallet.address)
+ eth_balance_wei = skale.web3.eth.get_balance(skale.wallet.address)
logger.info(f'Node account has {eth_balance_wei} WEI')
- balance_in_skl = skale.web3.fromWei(eth_balance_wei, 'ether')
- required_in_skl = skale.web3.fromWei(REQUIRED_BALANCE_WEI, 'ether')
+ balance_in_skl = skale.web3.from_wei(eth_balance_wei, 'ether')
+ required_in_skl = skale.web3.from_wei(REQUIRED_BALANCE_WEI, 'ether')
notify_balance(node_info, balance_in_skl, required_in_skl)
diff --git a/core/schains/process_manager.py b/core/schains/process_manager.py
index 5b37f49be..2397bed77 100644
--- a/core/schains/process_manager.py
+++ b/core/schains/process_manager.py
@@ -70,18 +70,22 @@ def run_process_manager(skale, skale_ima, node_config):
if not monitor_process_alive:
logger.info(f'{log_prefix} PID {schain_record.monitor_id} is not running, spawning...')
- process = Process(target=run_monitor_for_schain, args=(
- skale,
- skale_ima,
- node_config,
- schain
- ))
+ process = Process(
+ name=schain['name'],
+ target=run_monitor_for_schain,
+ args=(
+ skale,
+ skale_ima,
+ node_config,
+ schain
+ )
+ )
process.start()
schain_record.set_monitor_id(process.ident)
logger.info(f'{log_prefix} Process started: PID = {process.ident}')
else:
logger.info(f'{log_prefix} Process is running: PID = {schain_record.monitor_id}')
- logger.info('Creator procedure finished')
+ logger.info('Process manager procedure finished')
def fetch_schains_to_monitor(skale: Skale, node_id: int) -> list:
diff --git a/core/schains/rotation.py b/core/schains/rotation.py
index dc7976c46..f5b352ac5 100644
--- a/core/schains/rotation.py
+++ b/core/schains/rotation.py
@@ -21,19 +21,20 @@
import logging
import requests
-from core.schains.config.helper import get_skaled_http_address
-
logger = logging.getLogger(__name__)
-def set_rotation_for_schain(schain_name: str, timestamp: int) -> None:
- url = get_skaled_http_address(schain_name)
+class ExitRequestError(Exception):
+ pass
+
+
+def set_rotation_for_schain(url: str, timestamp: int) -> None:
_send_rotation_request(url, timestamp)
def _send_rotation_request(url, timestamp):
- logger.info(f'Send rotation request: {timestamp}')
+ logger.info(f'Sending rotation request: {timestamp}')
headers = {'content-type': 'application/json'}
data = {
'finishTime': timestamp
@@ -50,7 +51,7 @@ def _send_rotation_request(url, timestamp):
headers=headers,
).json()
if response.get('error'):
- raise Exception(response['error']['message'])
+ raise ExitRequestError(response['error']['message'])
def get_schain_public_key(skale, schain_name):
diff --git a/core/schains/runner.py b/core/schains/runner.py
index 73e0b8158..15cf2fa51 100644
--- a/core/schains/runner.py
+++ b/core/schains/runner.py
@@ -172,7 +172,7 @@ def restart_container(
def run_schain_container(
schain,
- public_key=None,
+ download_snapshot=False,
start_ts=None,
dutils=None,
volume_mode=None,
@@ -194,8 +194,8 @@ def run_schain_container(
cmd = get_schain_container_cmd(
schain_name,
- public_key,
start_ts,
+ download_snapshot=download_snapshot,
enable_ssl=enable_ssl,
snapshot_from=snapshot_from
)
diff --git a/core/schains/skaled_status.py b/core/schains/skaled_status.py
index a50e64695..02186a4a9 100644
--- a/core/schains/skaled_status.py
+++ b/core/schains/skaled_status.py
@@ -21,6 +21,7 @@
import json
import logging
from json.decoder import JSONDecodeError
+from typing import Optional
from core.schains.config.directory import skaled_status_filepath
from tools.config_utils import config_getter, log_broken_status_file
@@ -101,3 +102,10 @@ def log(self) -> None:
def init_skaled_status(schain_name) -> SkaledStatus:
status_filepath = skaled_status_filepath(schain_name)
return SkaledStatus(status_filepath)
+
+
+def get_skaled_status(schain_name) -> Optional[SkaledStatus]:
+ status_path = skaled_status_filepath(schain_name)
+ if os.path.isfile(status_path):
+ return SkaledStatus(status_path)
+ return None
diff --git a/core/schains/task.py b/core/schains/task.py
new file mode 100644
index 000000000..b95a8eb92
--- /dev/null
+++ b/core/schains/task.py
@@ -0,0 +1,51 @@
+import logging
+import time
+from concurrent.futures import Future, ThreadPoolExecutor
+from typing import Callable, List, Optional
+
+logger = logging.getLogger(__name__)
+
+
+class Task:
+ def __init__(
+ self,
+ name: str,
+ action: Callable,
+ index: int = 0,
+ sleep: int = 2
+ ) -> None:
+ self.name = name
+ self.index = index
+ self.action = action
+ self.sleep = sleep
+
+ def run(self) -> None:
+ try:
+ self.action()
+ except Exception as e:
+ logger.exception('Task %s failed with %s', self.name, e)
+ logger.info('Sleeping after task execution for %d', self.sleep)
+ time.sleep(self.sleep)
+
+
+def keep_tasks_running(
+ executor: ThreadPoolExecutor,
+ tasks: List[Task],
+ futures: List[Optional[Future]]
+) -> None:
+ for i, task in enumerate(tasks):
+ future = futures[i]
+ if future is not None and not future.running():
+ result = future.result()
+ logger.info('Task %s finished with %s', task.name, result)
+ if future is None or not future.running():
+ logger.info('Running task %s', task.name)
+ futures[i] = executor.submit(task.run)
+
+
+def run_tasks(name: str, tasks: List[Task]) -> None:
+ with ThreadPoolExecutor(max_workers=len(tasks), thread_name_prefix='T') as executor:
+ futures: List[Optional[Future]] = [None for i in range(len(tasks))]
+ while True:
+ keep_tasks_running(executor, tasks, futures)
+ time.sleep(30)
diff --git a/requirements-dev.txt b/requirements-dev.txt
index 63a24367c..6510ec576 100644
--- a/requirements-dev.txt
+++ b/requirements-dev.txt
@@ -1,4 +1,4 @@
-pytest==3.8.1
+pytest==7.1.3
flake8==5.0.4
freezegun==0.3.15
mock==4.0.2
diff --git a/requirements.txt b/requirements.txt
index 393b038ce..68a4bd4ed 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,26 +1,25 @@
peewee==3.9.5
Flask==2.2.5
-Werkzeug==2.2.2
+Werkzeug==2.2.3
gunicorn==20.1.0
Jinja2==3.0.3
-docker==6.1.2
+docker==6.1.3
simple-crypt==4.1.7
pycryptodome==3.12.0
python-iptables==1.0.0
-skale.py==5.8b1
+skale.py==6.0dev5
-ima-predeployed==1.3.5b1
-etherbase-predeployed==1.1.0b1
-marionette-predeployed==2.0.0b0
-multisigwallet-predeployed==1.1.0b0
-predeployed-generator==1.1.0a8
+ima-predeployed==2.0.0b0
+etherbase-predeployed==1.1.0b3
+marionette-predeployed==2.0.0b2
+config-controller-predeployed==1.0.1.dev2
+filestorage-predeployed==1.1.0.dev8
+multisigwallet-predeployed==1.1.0a8
-context-predeployed==1.0.0b0
-filestorage-predeployed==1.1.0b2
-config-controller-predeployed==1.0.1b0
+context-predeployed==1.0.0.dev3
psutil==5.9.3
@@ -34,5 +33,3 @@ cryptography==39.0.1
python-dateutil==2.8.1
python-telegram-bot==12.8
sh==1.14.1
-
-eth-utils==1.10.0
diff --git a/tests/conftest.py b/tests/conftest.py
index ad7009ddd..f6fa67fc8 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -5,10 +5,12 @@
import shutil
import string
import subprocess
+from pathlib import Path
import docker
import pytest
+
from skale import SkaleManager
from skale.wallets import Web3Wallet
from skale.utils.account_tools import generate_account, send_eth
@@ -35,9 +37,10 @@
get_node_ips_from_config,
get_own_ip_from_config
)
-from core.schains.config.directory import skaled_status_filepath
+from core.schains.config.directory import schain_config_dir, skaled_status_filepath
from core.schains.cleaner import remove_schain_container, remove_schain_volume
from core.schains.ima import ImaData
+from core.schains.external_config import ExternalConfig, ExternalState
from core.schains.skaled_status import init_skaled_status, SkaledStatus
from core.schains.config.skale_manager_opts import SkaleManagerOpts
@@ -52,6 +55,7 @@
from web.models.schain import create_tables, SChainRecord
from tests.utils import (
+ ALLOWED_RANGES,
CONFIG_STREAM,
ENDPOINT,
ETH_AMOUNT_PER_NODE,
@@ -280,6 +284,77 @@ def generate_schain_config(schain_name):
"schainID": 1,
"schainName": schain_name,
"schainOwner": "0x3483A10F7d6fDeE0b0C1E9ad39cbCE13BD094b12",
+
+
+ "nodeGroups": {
+ "1": {
+ "rotation": None,
+ "nodes": {
+ "2": [
+ 0,
+ 2,
+ "0xc21d242070e84fe5f8e80f14b8867856b714cf7d1984eaa9eb3f83c2a0a0e291b9b05754d071fbe89a91d4811b9b182d350f706dea6e91205905b86b4764ef9a" # noqa
+ ],
+ "5": [
+ 1,
+ 5,
+ "0xc37b6db727683379d305a4e38532ddeb58c014ebb151662635839edf3f20042bcdaa8e4b1938e8304512c730671aedf310da76315e329be0814709279a45222a" # noqa
+ ],
+ "4": [
+ 2,
+ 4,
+ "0x8b335f65ecf0845d93bc65a340cc2f4b8c49896f5023ecdff7db6f04bc39f9044239f541702ca7ad98c97aa6a7807aa7c41e394262cca0a32847e3c7c187baf5" # noqa
+ ],
+ "3": [
+ 3,
+ 3,
+ "0xf3496966c7fd4a82967d32809267abec49bf5c4cc6d88737cee9b1a436366324d4847127a1220575f4ea6a7661723cd5861c9f8de221405b260511b998a0bbc8" # noqa
+ ]
+ },
+ "finish_ts": None,
+ "bls_public_key": {
+ "blsPublicKey0": "8609115311055863404517113391175862520685049234001839865086978176708009850942", # noqa
+ "blsPublicKey1": "12596903066793884087763787291339131389612748572700005223043813683790087081", # noqa
+ "blsPublicKey2": "20949401227653007081557504259342598891084201308661070577835940778932311075846", # noqa
+ "blsPublicKey3": "5476329286206272760147989277520100256618500160343291262709092037265666120930" # noqa
+ }
+ },
+ "0": {
+ "rotation": {
+ "leaving_node_id": 1,
+ "new_node_id": 5
+ },
+ "nodes": {
+ "2": [
+ 0,
+ 2,
+ "0xc21d242070e84fe5f8e80f14b8867856b714cf7d1984eaa9eb3f83c2a0a0e291b9b05754d071fbe89a91d4811b9b182d350f706dea6e91205905b86b4764ef9a" # noqa
+ ],
+ "4": [
+ 2,
+ 4,
+ "0x8b335f65ecf0845d93bc65a340cc2f4b8c49896f5023ecdff7db6f04bc39f9044239f541702ca7ad98c97aa6a7807aa7c41e394262cca0a32847e3c7c187baf5" # noqa
+ ],
+ "3": [
+ 3,
+ 3,
+ "0xf3496966c7fd4a82967d32809267abec49bf5c4cc6d88737cee9b1a436366324d4847127a1220575f4ea6a7661723cd5861c9f8de221405b260511b998a0bbc8" # noqa
+ ],
+ "1": [
+ 1,
+ 1,
+ "0x1a857aa4a982ba242c2386febf1eb72dcd1f9669b4237a17878eb836086618af6cda473afa2dfb37c0d2786887397d39bec9601234d933d4384fe38a39b399df" # noqa
+ ]
+ },
+ "finish_ts": 1687180291,
+ "bls_public_key": {
+ "blsPublicKey0": "12452613198400495171048259986807077228209876295033433688114313813034253740478", # noqa
+ "blsPublicKey1": "10490413552821776191285904316985887024952448646239144269897585941191848882433", # noqa
+ "blsPublicKey2": "892041650350974543318836112385472656918171041007469041098688469382831828315", # noqa
+ "blsPublicKey3": "14699659615059580586774988732364564692366017113631037780839594032948908579205" # noqa
+ }
+ }
+ },
"nodes": [
{
"nodeID": 0,
@@ -359,18 +434,44 @@ def _schain_name():
@pytest.fixture
-def schain_config(_schain_name, predeployed_ima):
+def secret_key(_schain_name):
+ schain_dir_path = os.path.join(SCHAINS_DIR_PATH, _schain_name)
+ secret_key_path = os.path.join(schain_dir_path, 'secret_key_0.json')
+ try:
+ pathlib.Path(schain_dir_path).mkdir(parents=True, exist_ok=True)
+ with open(secret_key_path, 'w') as key_file:
+ json.dump(SECRET_KEY, key_file)
+ yield SECRET_KEY
+ finally:
+ rm_schain_dir(_schain_name)
+
+
+@pytest.fixture
+def secret_keys(_schain_name):
+ schain_dir_path = os.path.join(SCHAINS_DIR_PATH, _schain_name)
+ secret_key_path_0 = os.path.join(schain_dir_path, 'secret_key_0.json')
+ secret_key_path_1 = os.path.join(schain_dir_path, 'secret_key_1.json')
+ try:
+ pathlib.Path(schain_dir_path).mkdir(parents=True, exist_ok=True)
+ with open(secret_key_path_0, 'w') as key_file:
+ json.dump(SECRET_KEY, key_file)
+ with open(secret_key_path_1, 'w') as key_file:
+ json.dump(SECRET_KEY, key_file)
+ yield SECRET_KEY
+ finally:
+ rm_schain_dir(_schain_name)
+
+
+@pytest.fixture
+def schain_config(_schain_name, secret_key, predeployed_ima):
schain_dir_path = os.path.join(SCHAINS_DIR_PATH, _schain_name)
- pathlib.Path(schain_dir_path).mkdir(parents=True, exist_ok=True)
config_path = os.path.join(schain_dir_path,
f'schain_{_schain_name}.json')
- secret_key_path = os.path.join(schain_dir_path, 'secret_key_0.json')
- schain_config = generate_schain_config(_schain_name)
- with open(config_path, 'w') as config_file:
- json.dump(schain_config, config_file)
- with open(secret_key_path, 'w') as key_file:
- json.dump(SECRET_KEY, key_file)
try:
+ pathlib.Path(schain_dir_path).mkdir(parents=True, exist_ok=True)
+ schain_config = generate_schain_config(_schain_name)
+ with open(config_path, 'w') as config_file:
+ json.dump(schain_config, config_file)
yield schain_config
finally:
rm_schain_dir(_schain_name)
@@ -418,7 +519,8 @@ def skaled_status_exit_time_reached(_schain_name):
@pytest.fixture
def skaled_status_repair(_schain_name):
- generate_schain_skaled_status_file(_schain_name, clear_data_dir=True, start_from_snapshot=True)
+ generate_schain_skaled_status_file(
+ _schain_name, clear_data_dir=True, start_from_snapshot=True)
try:
yield init_skaled_status(_schain_name)
finally:
@@ -479,12 +581,12 @@ def meta_file():
@pytest.fixture
-def schain_on_contracts(skale, nodes, _schain_name) -> str:
+def schain_on_contracts(skale, nodes, _schain_name):
try:
yield create_schain(
skale,
schain_type=1, # test2 should have 1 index
- random_name=True
+ schain_name=_schain_name
)
finally:
cleanup_nodes_schains(skale)
@@ -517,11 +619,16 @@ def skaled_mock_image(scope='module'):
@pytest.fixture
def cleanup_schain_dirs_before():
- shutil.rmtree(SCHAINS_DIR_PATH)
+ shutil.rmtree(SCHAINS_DIR_PATH, ignore_errors=True)
pathlib.Path(SCHAINS_DIR_PATH).mkdir(parents=True, exist_ok=True)
return
+@pytest.fixture
+def clean_docker(dutils, cleanup_schain_containers, cleanup_ima_containers):
+ pass
+
+
@pytest.fixture
def cleanup_schain_containers(dutils):
try:
@@ -530,6 +637,7 @@ def cleanup_schain_containers(dutils):
containers = dutils.get_all_schain_containers(all=True)
for container in containers:
dutils.safe_rm(container.name, force=True)
+ dutils.safe_rm(container.name.replace('schain', 'ima'), force=True)
@pytest.fixture
@@ -564,7 +672,7 @@ def node_config(skale, nodes):
@pytest.fixture
-def schain_checks(schain_config, schain_db, rule_controller, dutils):
+def schain_checks(schain_config, schain_db, rule_controller, estate, dutils):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
schain_record = SChainRecord.get_by_name(schain_name)
node_id = schain_config['skaleConfig']['sChain']['nodes'][0]['nodeID']
@@ -573,6 +681,8 @@ def schain_checks(schain_config, schain_db, rule_controller, dutils):
node_id,
schain_record=schain_record,
rule_controller=rule_controller,
+ stream_version=CONFIG_STREAM,
+ estate=estate,
dutils=dutils
)
@@ -589,7 +699,7 @@ def schain_struct(schain_config):
@pytest.fixture
def ima_data(skale):
- return ImaData(linked=True, chain_id=skale.web3.eth.chainId)
+ return ImaData(linked=True, chain_id=skale.web3.eth.chain_id)
@pytest.fixture
@@ -622,3 +732,53 @@ def skale_manager_opts():
schains_internal_address='0x1656',
nodes_address='0x7742'
)
+
+
+@pytest.fixture
+def new_upstream(schain_db):
+ name = schain_db
+ config_dir = schain_config_dir(name)
+ upath = os.path.join(f'schain_{name}_2_2_1_16_1687248983')
+ try:
+ Path(upath).touch()
+ yield upath
+ finally:
+ shutil.rmtree(config_dir, ignore_errors=True)
+
+
+@pytest.fixture
+def estate(skale):
+ return ExternalState(
+ ima_linked=True,
+ chain_id=skale.web3.eth.chain_id,
+ ranges=ALLOWED_RANGES
+ )
+
+
+@pytest.fixture
+def econfig(schain_db, estate):
+ name = schain_db
+ ec = ExternalConfig(name)
+ ec.update(estate)
+ return ec
+
+
+@pytest.fixture
+def upstreams(schain_db, schain_config):
+ name = schain_db
+ config_folder = schain_config_dir(name)
+ files = [
+ f'schain_{name}_10_1687183338.json',
+ f'schain_{name}_9_1687183335.json',
+ f'schain_{name}_11_1687183336.json',
+ f'schain_{name}_11_1687183337.json',
+ f'schain_{name}_11_1687183339.json'
+ ]
+ try:
+ for fname in files:
+ fpath = os.path.join(config_folder, fname)
+ with open(fpath, 'w') as f:
+ json.dump(schain_config, f)
+ yield files
+ finally:
+ shutil.rmtree(config_folder, ignore_errors=True)
diff --git a/tests/db_test.py b/tests/db_test.py
index 0cf76ff03..40ede0ca5 100644
--- a/tests/db_test.py
+++ b/tests/db_test.py
@@ -1,4 +1,4 @@
-from concurrent.futures import ProcessPoolExecutor as pexec
+from concurrent.futures import as_completed, ProcessPoolExecutor as pexec
import pytest
@@ -32,7 +32,7 @@ def test_upsert_schain_record(db):
executor.submit(upsert_schain_record, f'schain-{i}')
for i in range(RECORDS_NUMBER)
]
- for f in futures:
+ for f in as_completed(futures):
f.result()
assert SChainRecord.select().count() == RECORDS_NUMBER
diff --git a/tests/dkg_test/filter_test.py b/tests/dkg_test/filter_test.py
index 28e794d6d..2580727bd 100644
--- a/tests/dkg_test/filter_test.py
+++ b/tests/dkg_test/filter_test.py
@@ -10,7 +10,7 @@
@pytest.fixture
def filter_mock(skale):
filter = Filter(skale, SCHAIN_NAME, N)
- filter.first_unseen_block = skale.web3.eth.getBlock("latest")['number'] - 100
+ filter.first_unseen_block = skale.web3.eth.get_block("latest")['number'] - 100
return filter
@@ -24,9 +24,9 @@ def assert_not_called_with(self, *args, **kwargs):
mock.Mock.assert_not_called_with = assert_not_called_with
first = filter_mock.first_unseen_block
- latest = skale.web3.eth.getBlock("latest")['number']
- with mock.patch.object(skale.web3.eth, 'getBlock',
- wraps=skale.web3.eth.getBlock) as block_mock:
+ latest = skale.web3.eth.get_block("latest")['number']
+ with mock.patch.object(skale.web3.eth, 'get_block',
+ wraps=skale.web3.eth.get_block) as block_mock:
result = filter_mock.get_events()
block_mock.assert_not_called_with(first - 1)
block_mock.assert_any_call(first, full_transactions=True)
@@ -36,10 +36,10 @@ def assert_not_called_with(self, *args, **kwargs):
def test_get_events_from_start(skale, filter_mock):
- latest = skale.web3.eth.getBlock("latest")['number']
- mock_start_block = skale.web3.eth.getBlock("latest")['number'] - 100
- with mock.patch.object(skale.web3.eth, 'getBlock',
- wraps=skale.web3.eth.getBlock) as block_mock, \
+ latest = skale.web3.eth.get_block("latest")['number']
+ mock_start_block = skale.web3.eth.get_block("latest")['number'] - 100
+ with mock.patch.object(skale.web3.eth, 'get_block',
+ wraps=skale.web3.eth.get_block) as block_mock, \
mock.patch.object(skale.dkg.contract.functions.getChannelStartedBlock, 'call',
new=mock.Mock(return_value=mock_start_block)):
result = filter_mock.get_events(from_channel_started_block=True)
diff --git a/tests/logger_test.py b/tests/logger_test.py
index 603e281b2..daa26c9fd 100644
--- a/tests/logger_test.py
+++ b/tests/logger_test.py
@@ -23,4 +23,4 @@ def test_custom_formatter():
ADMIN_LOG_FORMAT,
compose_hiding_patterns()
).format(record)
- assert 'MainThread - None:0 - [SGX_KEY], http://54.545.454.12:1231, [ETH_IP] http://[ETH_IP]:8080, [ETH_IP][ETH_IP]loc https://testnet.com, wss://127.0.0.1.com, ttt://127.0.0.1.com, foo://127.0.0.1.com, NEK//127.0.0.1.com, ' in formatted_text # noqa
+ assert '[MainProcess][MainThread] - None:0 - [SGX_KEY], http://54.545.454.12:1231, [ETH_IP] http://[ETH_IP]:8080, [ETH_IP][ETH_IP]loc https://testnet.com, wss://127.0.0.1.com, ttt://127.0.0.1.com, foo://127.0.0.1.com, NEK//127.0.0.1.com, ' in formatted_text # noqa
diff --git a/tests/routes/health_test.py b/tests/routes/health_test.py
index 0d42ea7d2..c0b9d61b4 100644
--- a/tests/routes/health_test.py
+++ b/tests/routes/health_test.py
@@ -6,7 +6,7 @@
from sgx import SgxClient
from core.node_config import NodeConfig
-from core.schains.checks import SChainChecks, CheckRes
+from core.schains.checks import SChainChecks
from tools.configs import SGX_SERVER_URL, SGX_CERTIFICATES_FOLDER
@@ -92,11 +92,7 @@ def test_schains_checks(skale_bp, skale, schain_db, dutils):
class SChainChecksMock(SChainChecks):
def __init__(self, *args, **kwargs):
- super(SChainChecksMock, self).__init__(*args, dutils=dutils, **kwargs)
-
- @property
- def firewall_rules(self) -> CheckRes:
- return CheckRes(True)
+ super().__init__(*args, dutils=dutils, **kwargs)
def get_schains_for_node_mock(self, node_id):
return [
@@ -105,8 +101,7 @@ def get_schains_for_node_mock(self, node_id):
{'name': ''}
]
- with mock.patch('web.routes.health.SChainChecks', SChainChecksMock), \
- mock.patch('web.routes.health.SChainChecks', SChainChecksMock):
+ with mock.patch('web.routes.health.SChainChecks', SChainChecksMock):
with mock.patch(
'skale.contracts.manager.schains.SChains.get_schains_for_node',
get_schains_for_node_mock
@@ -121,7 +116,7 @@ def get_schains_for_node_mock(self, node_id):
'dkg': False,
'config': False,
'volume': False,
- 'firewall_rules': True,
+ 'firewall_rules': False,
'skaled_container': False,
'exit_code_ok': True,
'rpc': False,
diff --git a/tests/routes/node_test.py b/tests/routes/node_test.py
index 0d83d8a74..b641504df 100644
--- a/tests/routes/node_test.py
+++ b/tests/routes/node_test.py
@@ -142,7 +142,7 @@ def test_create_with_errors(skale_bp):
def get_expected_signature(skale, validator_id):
- unsigned_hash = Web3.solidityKeccak(['uint256'], [validator_id])
+ unsigned_hash = Web3.solidity_keccak(['uint256'], [validator_id])
signed_hash = skale.wallet.sign_hash(unsigned_hash.hex())
return signed_hash.signature.hex()
diff --git a/tests/routes/schains_test.py b/tests/routes/schains_test.py
index c876d8472..02c41ea2f 100644
--- a/tests/routes/schains_test.py
+++ b/tests/routes/schains_test.py
@@ -9,7 +9,7 @@
from Crypto.Hash import keccak
from core.node_config import NodeConfig
-from core.schains.config.directory import schain_config_filepath
+from core.schains.config.file_manager import ConfigFileManager
from tests.utils import get_bp_data, get_test_rule_controller, post_bp_data
from web.models.schain import SChainRecord, upsert_schain_record
from web.routes.schains import schains_bp
@@ -44,18 +44,21 @@ def test_schain_statuses(skale_bp, skaled_status, _schain_name):
def test_schain_config(skale_bp, skale, schain_config, schain_on_contracts):
name = schain_on_contracts
- filename = schain_config_filepath(name)
- dirname = os.path.dirname(filename)
+ filepath = ConfigFileManager(name).skaled_config_path
+ dirname = os.path.dirname(filepath)
if not os.path.isdir(dirname):
- os.makedirs(os.path.dirname(filename))
- with open(filename, 'w') as f:
- text = {'skaleConfig': {'nodeInfo': {'nodeID': 1}}}
- f.write(json.dumps(text))
- data = get_bp_data(skale_bp, get_api_url(BLUEPRINT_NAME, 'config'), {'schain_name': name})
- assert data == {'payload': {'nodeInfo': {'nodeID': 1}},
- 'status': 'ok'}
- os.remove(filename)
- shutil.rmtree(os.path.dirname(filename))
+ os.makedirs(os.path.dirname(filepath))
+ try:
+ with open(filepath, 'w') as f:
+ text = {'skaleConfig': {'nodeInfo': {'nodeID': 1}}}
+ f.write(json.dumps(text))
+ data = get_bp_data(skale_bp, get_api_url(
+ BLUEPRINT_NAME, 'config'), {'schain_name': name})
+ assert data == {'payload': {'nodeInfo': {'nodeID': 1}},
+ 'status': 'ok'}
+ finally:
+ os.remove(filepath)
+ shutil.rmtree(os.path.dirname(filepath), ignore_errors=True)
def test_schains_list(skale_bp, skale):
@@ -67,7 +70,6 @@ def schain_config_exists_mock(schain):
return True
-@mock.patch('web.routes.schains.schain_config_exists', schain_config_exists_mock)
@mock.patch(
'web.routes.schains.get_default_rule_controller',
partial(get_test_rule_controller, synced=True)
@@ -139,16 +141,6 @@ def test_get_schain(
keccak_hash = keccak.new(data=schain_name.encode("utf8"), digest_bits=256)
schain_id = '0x' + keccak_hash.hexdigest()
- data = get_bp_data(
- skale_bp,
- get_api_url(BLUEPRINT_NAME, 'get'),
- params={'schain_name': schain_name}
- )
- assert data == {
- 'payload': f'No schain with name {schain_name}',
- 'status': 'error'
- }
-
r = upsert_schain_record(schain_name)
r.set_config_version(meta_file['config_stream'])
data = get_bp_data(
@@ -167,6 +159,17 @@ def test_get_schain(
}
}
+ not_existing_schain = 'not-existing-schain'
+ data = get_bp_data(
+ skale_bp,
+ get_api_url(BLUEPRINT_NAME, 'get'),
+ params={'schain_name': not_existing_schain}
+ )
+ assert data == {
+ 'payload': f'No schain with name {not_existing_schain}',
+ 'status': 'error'
+ }
+
def test_schain_containers_versions(skale_bp):
skaled_version = '3.7.3-develop.4'
@@ -176,7 +179,8 @@ def test_schain_containers_versions(skale_bp):
return_value=skaled_version
), mock.patch('web.routes.schains.get_ima_version',
return_value=ima_version):
- data = get_bp_data(skale_bp, get_api_url(BLUEPRINT_NAME, 'container-versions'))
+ data = get_bp_data(skale_bp, get_api_url(
+ BLUEPRINT_NAME, 'container-versions'))
assert data == {
'status': 'ok',
'payload': {
diff --git a/tests/routes/wallet_test.py b/tests/routes/wallet_test.py
index 8b338266a..22ca3a2d8 100644
--- a/tests/routes/wallet_test.py
+++ b/tests/routes/wallet_test.py
@@ -25,14 +25,14 @@ def handler(sender, **kwargs):
def test_load_wallet(skale_bp, skale):
data = get_bp_data(skale_bp, get_api_url(BLUEPRINT_NAME, 'info'))
address = skale.wallet.address
- eth_balance_wei = skale.web3.eth.getBalance(address)
+ eth_balance_wei = skale.web3.eth.get_balance(address)
expected_data = {
'status': 'ok',
'payload': {
'address': to_checksum_address(address),
'eth_balance_wei': eth_balance_wei,
'skale_balance_wei': 0, # TODO: Remove from node cli
- 'eth_balance': str(skale.web3.fromWei(eth_balance_wei, 'ether')),
+ 'eth_balance': str(skale.web3.from_wei(eth_balance_wei, 'ether')),
'skale_balance': '0' # TODO: Remove from node cli
}
}
@@ -42,32 +42,32 @@ def test_load_wallet(skale_bp, skale):
def test_send_eth(skale_bp, skale):
address = skale.wallet.address
amount = '0.01'
- amount_wei = skale.web3.toWei(amount, 'ether')
+ amount_wei = skale.web3.to_wei(amount, 'ether')
receiver_0 = '0xf38b5dddd74b8901c9b5fb3ebd60bf5e7c1e9763'
checksum_receiver_0 = to_checksum_address(receiver_0)
- receiver_balance_0 = skale.web3.eth.getBalance(checksum_receiver_0)
- balance_0 = skale.web3.eth.getBalance(address)
+ receiver_balance_0 = skale.web3.eth.get_balance(checksum_receiver_0)
+ balance_0 = skale.web3.eth.get_balance(address)
json_data = {
'address': receiver_0,
'amount': amount
}
data = post_bp_data(skale_bp, get_api_url(BLUEPRINT_NAME, 'send-eth'), json_data)
- balance_1 = skale.web3.eth.getBalance(address)
+ balance_1 = skale.web3.eth.get_balance(address)
assert data == {'status': 'ok', 'payload': {}}
assert balance_1 < balance_0
- assert skale.web3.eth.getBalance(checksum_receiver_0) - \
+ assert skale.web3.eth.get_balance(checksum_receiver_0) - \
receiver_balance_0 == amount_wei
receiver_1 = '0x01C19c5d3Ad1C3014145fC82263Fbae09e23924A'
- receiver_balance_1 = skale.web3.eth.getBalance(receiver_1)
+ receiver_balance_1 = skale.web3.eth.get_balance(receiver_1)
json_data = {
'address': receiver_1,
'amount': amount
}
data = post_bp_data(skale_bp, get_api_url(BLUEPRINT_NAME, 'send-eth'), json_data)
assert data == {'status': 'ok', 'payload': {}}
- assert skale.web3.eth.getBalance(address) < balance_1
- assert skale.web3.eth.getBalance(receiver_1) - \
+ assert skale.web3.eth.get_balance(address) < balance_1
+ assert skale.web3.eth.get_balance(receiver_1) - \
receiver_balance_1 == amount_wei
diff --git a/tests/schains/checks_test.py b/tests/schains/checks_test.py
index 022f0c903..85a5c14a7 100644
--- a/tests/schains/checks_test.py
+++ b/tests/schains/checks_test.py
@@ -1,3 +1,4 @@
+import json
import os
import time
from http import HTTPStatus
@@ -9,18 +10,22 @@
import docker
import pytest
-from core.schains.skaled_exit_codes import SkaledExitCodes
from core.schains.checks import SChainChecks, CheckRes
+from core.schains.config.file_manager import UpstreamConfigFilename
+from core.schains.config.directory import (
+ get_schain_check_filepath,
+ schain_config_dir
+)
+from core.schains.skaled_exit_codes import SkaledExitCodes
from core.schains.runner import get_container_info, get_image_name, run_ima_container
-from core.schains.cleaner import remove_ima_container
-from core.schains.config.directory import get_schain_check_filepath
+# from core.schains.cleaner import remove_ima_container
from tools.configs.containers import IMA_CONTAINER, SCHAIN_CONTAINER
from tools.helper import read_json
from web.models.schain import upsert_schain_record, SChainRecord
-from tests.utils import get_schain_contracts_data, response_mock, request_mock
+from tests.utils import CONFIG_STREAM, get_schain_contracts_data, response_mock, request_mock
NOT_EXISTS_SCHAIN_NAME = 'qwerty123'
@@ -34,6 +39,7 @@
TEST_TIMESTAMP_HEX = '0x55ba467c'
TEST_TIMESTAMP = int(TEST_TIMESTAMP_HEX, 16)
+
ETH_GET_BLOCK_RESULT = {
"jsonrpc": "2.0",
"id": 1,
@@ -71,7 +77,7 @@ def firewall_rules(self) -> CheckRes:
@pytest.fixture
-def sample_false_checks(schain_config, schain_db, rule_controller, dutils):
+def sample_false_checks(schain_config, schain_db, rule_controller, estate, dutils):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
schain_record = SChainRecord.get_by_name(schain_name)
return SChainChecks(
@@ -79,6 +85,8 @@ def sample_false_checks(schain_config, schain_db, rule_controller, dutils):
TEST_NODE_ID,
schain_record=schain_record,
rule_controller=rule_controller,
+ stream_version=CONFIG_STREAM,
+ estate=estate,
dutils=dutils
)
@@ -88,6 +96,7 @@ def rules_unsynced_checks(
schain_config,
uninited_rule_controller,
schain_db,
+ estate,
dutils
):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
@@ -97,6 +106,8 @@ def rules_unsynced_checks(
TEST_NODE_ID,
schain_record=schain_record,
rule_controller=uninited_rule_controller,
+ stream_version=CONFIG_STREAM,
+ estate=estate,
dutils=dutils
)
@@ -111,15 +122,28 @@ def test_dkg_check(schain_checks, sample_false_checks):
assert not sample_false_checks.dkg.status
-def test_config_check(schain_checks, sample_false_checks):
- with mock.patch('core.schains.checks.schain_config_version_match', return_value=True):
- assert schain_checks.config.status
- assert not sample_false_checks.config.status
+def test_upstream_config_check(schain_checks):
+ assert not schain_checks.upstream_config
+ ts = int(time.time())
+ name, rotation_id = schain_checks.name, schain_checks.rotation_id
+ upstream_path = os.path.join(
+ schain_config_dir(name),
+ f'schain_{name}_{rotation_id}_{ts}.json'
+ )
-def test_config_check_wrong_version(schain_checks):
- schain_checks.schain_record = SchainRecordMock('9.8.7')
- assert not schain_checks.config.status
+ with open(upstream_path, 'w') as upstream_file:
+ json.dump({'config': 'upstream'}, upstream_file)
+
+ assert schain_checks.upstream_config
+
+ schain_checks._subjects[0].stream_version = 'new-version'
+ assert not schain_checks.upstream_config
+
+
+def test_config_check(schain_checks, sample_false_checks):
+ assert schain_checks.config
+ assert not sample_false_checks.config
def test_volume_check(schain_checks, sample_false_checks, dutils):
@@ -133,10 +157,8 @@ def test_volume_check(schain_checks, sample_false_checks, dutils):
def test_firewall_rules_check(schain_checks, rules_unsynced_checks):
schain_checks.rc.sync()
- with mock.patch('core.schains.checks.schain_config_version_match', return_value=True):
- assert schain_checks.firewall_rules.status
- with mock.patch('core.schains.checks.schain_config_version_match', return_value=True):
- assert not rules_unsynced_checks.firewall_rules.status
+ assert schain_checks.firewall_rules
+ assert not rules_unsynced_checks.firewall_rules.status
def test_container_check(schain_checks, sample_false_checks):
@@ -163,29 +185,31 @@ def test_ima_container_check(schain_checks, cleanup_ima_containers, dutils):
name = schain_checks.name
schain = get_schain_contracts_data(name)
image = get_image_name(type=IMA_CONTAINER)
- new_image = get_image_name(type=IMA_CONTAINER, new=True)
+ # new_image = get_image_name(type=IMA_CONTAINER, new=True)
# if dutils.pulled(new_image):
# dutils.rmi(new_image)
- assert not schain_checks.ima_container.status
+ # assert not schain_checks.ima_container.status
- with mock.patch('core.schains.checks.get_ima_migration_ts', return_value=mts):
- run_ima_container(schain, mainnet_chain_id=1, image=image, dutils=dutils)
+ # with mock.patch('core.schains.checks.get_ima_migration_ts', return_value=mts):
+ # run_ima_container(schain, mainnet_chain_id=1,
+ # image=image, dutils=dutils)
- assert not schain_checks.ima_container.status
+ # assert not schain_checks.ima_container.status
- dutils.pull(new_image)
+ # dutils.pull(new_image)
- assert schain_checks.ima_container.status
+ # assert schain_checks.ima_container.status
- remove_ima_container(name, dutils)
+ # remove_ima_container(name, dutils)
mts = ts - 3600
with mock.patch('core.schains.checks.get_ima_migration_ts', return_value=mts):
assert not schain_checks.ima_container.status
image = get_image_name(type=IMA_CONTAINER, new=True)
- run_ima_container(schain, mainnet_chain_id=1, image=image, dutils=dutils)
+ run_ima_container(schain, mainnet_chain_id=1,
+ image=image, dutils=dutils)
assert schain_checks.ima_container.status
@@ -215,8 +239,9 @@ def test_rpc_check(schain_checks, schain_db):
with mock.patch('requests.post', rmock):
assert schain_checks.rpc.status
assert rmock.call_args == mock.call(
- 'http://0.0.0.0:10003',
- json={'jsonrpc': '2.0', 'method': 'eth_blockNumber', 'params': [], 'id': 1},
+ 'http://127.0.0.1:10003',
+ json={'jsonrpc': '2.0', 'method': 'eth_blockNumber',
+ 'params': [], 'id': 1},
cookies=None,
timeout=expected_timeout
)
@@ -224,15 +249,14 @@ def test_rpc_check(schain_checks, schain_db):
def test_blocks_check(schain_checks):
res_mock = response_mock(HTTPStatus.OK, ETH_GET_BLOCK_RESULT)
- with mock.patch('core.schains.checks.schain_config_version_match', return_value=True):
- with mock.patch('requests.post', return_value=res_mock), \
- mock.patch('time.time', return_value=TEST_TIMESTAMP):
- assert schain_checks.blocks.status
- with mock.patch('requests.post', return_value=res_mock):
- assert not schain_checks.blocks.status
+ with mock.patch('requests.post', return_value=res_mock), \
+ mock.patch('time.time', return_value=TEST_TIMESTAMP):
+ assert schain_checks.blocks
+ with mock.patch('requests.post', return_value=res_mock):
+ assert not schain_checks.blocks
-def test_init_checks(skale, schain_db, uninited_rule_controller, dutils):
+def test_init_checks(skale, schain_db, uninited_rule_controller, estate, dutils):
schain_name = schain_db
schain_record = SChainRecord.get_by_name(schain_name)
checks = SChainChecks(
@@ -240,13 +264,15 @@ def test_init_checks(skale, schain_db, uninited_rule_controller, dutils):
TEST_NODE_ID,
schain_record=schain_record,
rule_controller=uninited_rule_controller,
+ stream_version=CONFIG_STREAM,
+ estate=estate,
dutils=dutils
)
assert checks.name == schain_name
assert checks.node_id == TEST_NODE_ID
-def test_exit_code(skale, rule_controller, schain_db, dutils):
+def test_exit_code(skale, rule_controller, schain_db, estate, dutils):
test_schain_name = schain_db
image_name, container_name, _, _ = get_container_info(
SCHAIN_CONTAINER, test_schain_name)
@@ -265,6 +291,8 @@ def test_exit_code(skale, rule_controller, schain_db, dutils):
TEST_NODE_ID,
schain_record=schain_record,
rule_controller=rule_controller,
+ stream_version=CONFIG_STREAM,
+ estate=estate,
dutils=dutils
)
assert not checks.exit_code_ok.status
@@ -274,13 +302,15 @@ def test_exit_code(skale, rule_controller, schain_db, dutils):
dutils.safe_rm(container_name)
-def test_process(skale, rule_controller, schain_db, dutils):
+def test_process(skale, rule_controller, schain_db, estate, dutils):
schain_record = SChainRecord.get_by_name(schain_db)
checks = SChainChecks(
schain_db,
TEST_NODE_ID,
schain_record=schain_record,
rule_controller=rule_controller,
+ stream_version=CONFIG_STREAM,
+ estate=estate,
dutils=dutils
)
assert not checks.process.status
@@ -293,7 +323,7 @@ def test_process(skale, rule_controller, schain_db, dutils):
assert not checks.process.status
-def test_get_all(schain_config, rule_controller, dutils, schain_db):
+def test_get_all(schain_config, rule_controller, dutils, schain_db, estate):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
schain_record = SChainRecord.get_by_name(schain_name)
node_id = schain_config['skaleConfig']['sChain']['nodes'][0]['nodeID']
@@ -302,12 +332,12 @@ def test_get_all(schain_config, rule_controller, dutils, schain_db):
node_id,
schain_record=schain_record,
rule_controller=rule_controller,
+ stream_version=CONFIG_STREAM,
+ estate=estate,
dutils=dutils
)
checks_dict = checks.get_all()
- assert isinstance(checks_dict['config_dir'], bool)
- assert isinstance(checks_dict['dkg'], bool)
assert isinstance(checks_dict['config'], bool)
assert isinstance(checks_dict['firewall_rules'], bool)
assert isinstance(checks_dict['skaled_container'], bool)
@@ -317,34 +347,40 @@ def test_get_all(schain_config, rule_controller, dutils, schain_db):
assert isinstance(checks_dict['ima_container'], bool)
assert isinstance(checks_dict['process'], bool)
+ estate.ima_linked = False
checks_without_ima = SChainChecksMock(
schain_db,
node_id,
schain_record=schain_record,
rule_controller=rule_controller,
- dutils=dutils,
- ima_linked=False
+ stream_version=CONFIG_STREAM,
+ estate=estate,
+ dutils=dutils
)
checks_dict_without_ima = checks_without_ima.get_all()
assert 'ima_container' not in checks_dict_without_ima
- filtered_checks = checks_without_ima.get_all(checks_filter=['config', 'volume'])
+ filtered_checks = checks_without_ima.get_all(
+ needed=['config', 'volume'])
assert len(filtered_checks) == 2
- filtered_checks = checks_without_ima.get_all(checks_filter=['ima_container'])
+ filtered_checks = checks_without_ima.get_all(
+ needed=['ima_container'])
assert len(filtered_checks) == 0
- filtered_checks = checks_without_ima.get_all(checks_filter=['<0_0>'])
+ filtered_checks = checks_without_ima.get_all(needed=['<0_0>'])
assert len(filtered_checks) == 0
-def test_get_all_with_save(node_config, rule_controller, dutils, schain_db):
+def test_get_all_with_save(node_config, rule_controller, dutils, schain_db, estate):
schain_record = upsert_schain_record(schain_db)
checks = SChainChecksMock(
schain_db,
node_config.id,
schain_record=schain_record,
rule_controller=rule_controller,
+ stream_version=CONFIG_STREAM,
+ estate=estate,
dutils=dutils
)
schain_check_path = get_schain_check_filepath(schain_db)
@@ -353,3 +389,53 @@ def test_get_all_with_save(node_config, rule_controller, dutils, schain_db):
assert os.path.isfile(schain_check_path)
checks_from_file = read_json(schain_check_path)
assert schain_checks == checks_from_file['checks']
+
+
+def test_config_updated(skale, rule_controller, schain_db, estate, dutils):
+ name = schain_db
+ folder = schain_config_dir(name)
+
+ schain_record = SChainRecord.get_by_name(name)
+
+ checks = SChainChecks(
+ name,
+ TEST_NODE_ID,
+ schain_record=schain_record,
+ rule_controller=rule_controller,
+ stream_version=CONFIG_STREAM,
+ estate=estate,
+ dutils=dutils
+ )
+ assert checks.config_updated
+
+ upstream_path = UpstreamConfigFilename(
+ name, rotation_id=5, ts=int(time.time())).abspath(folder)
+
+ config_content = {'config': 'mock_v5'}
+ with open(upstream_path, 'w') as upstream_file:
+ json.dump(config_content, upstream_file)
+ assert not checks.config_updated
+
+ schain_record.set_sync_config_run(True)
+ checks = SChainChecks(
+ name,
+ TEST_NODE_ID,
+ schain_record=schain_record,
+ rule_controller=rule_controller,
+ stream_version=CONFIG_STREAM,
+ estate=estate,
+ dutils=dutils
+ )
+ assert not checks.config_updated
+
+ schain_record.set_config_version('new-version')
+ checks = SChainChecks(
+ name,
+ TEST_NODE_ID,
+ schain_record=schain_record,
+ rule_controller=rule_controller,
+ stream_version=CONFIG_STREAM,
+ estate=estate,
+ dutils=dutils
+ )
+ assert not checks.config_updated
diff --git a/tests/schains/cleaner_test.py b/tests/schains/cleaner_test.py
index 6a5249301..a342ebc51 100644
--- a/tests/schains/cleaner_test.py
+++ b/tests/schains/cleaner_test.py
@@ -43,7 +43,9 @@ class ImaEnv:
schain_dir: str
def to_dict(self):
- return {}
+ return {
+ 'SCHAIN_DIR': self.schain_dir,
+ }
def is_container_running(dutils, container_name):
@@ -121,13 +123,19 @@ def schain_container(schain_config, ssl_folder, dutils):
""" Creates and removes schain container """
schain_name = schain_config['skaleConfig']['sChain']['schainName']
schain_data = get_schain_contracts_data(schain_name)
- run_simple_schain_container(schain_data, dutils)
- yield schain_name
- schain_name = schain_config['skaleConfig']['sChain']['schainName']
- dutils.safe_rm(get_container_name(SCHAIN_CONTAINER, schain_name),
- force=True)
- dutils.safe_rm(get_container_name(IMA_CONTAINER, schain_name),
- force=True)
+ try:
+ run_simple_schain_container(schain_data, dutils)
+ yield schain_name
+ finally:
+ schain_name = schain_config['skaleConfig']['sChain']['schainName']
+ dutils.safe_rm(
+ get_container_name(SCHAIN_CONTAINER, schain_name),
+ force=True
+ )
+ dutils.safe_rm(
+ get_container_name(IMA_CONTAINER, schain_name),
+ force=True
+ )
def test_remove_schain_container(
@@ -145,6 +153,7 @@ def test_remove_schain_container(
assert not is_container_running(dutils, container_name)
+@pytest.mark.skip('Docker API GA issues need to be resolved')
def test_remove_ima_container(dutils, schain_container):
schain_name = schain_container
schain_data = get_schain_contracts_data(schain_name)
@@ -153,9 +162,9 @@ def test_remove_ima_container(dutils, schain_container):
)):
run_simple_ima_container(schain_data, dutils)
container_name = IMA_CONTAINER_NAME_TEMPLATE.format(schain_name)
- assert dutils.is_container_exists(container_name)
+ assert dutils.is_container_found(container_name)
remove_ima_container(schain_name, dutils=dutils)
- assert not dutils.is_container_exists(container_name)
+ assert not dutils.is_container_found(container_name)
def test_remove_schain_record():
diff --git a/tests/schains/cmd_test.py b/tests/schains/cmd_test.py
index 7deeacf5c..a87a25aaf 100644
--- a/tests/schains/cmd_test.py
+++ b/tests/schains/cmd_test.py
@@ -2,7 +2,7 @@
get_schain_container_cmd,
get_schain_container_sync_opts
)
-from core.schains.config.directory import schain_config_filepath
+from core.schains.config.main import get_skaled_container_config_path
from core.schains.ssl import get_ssl_filepath
from tools.configs.containers import SHARED_SPACE_CONTAINER_PATH
@@ -13,7 +13,7 @@
def test_get_schain_container_cmd(schain_config, cert_key_pair):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
container_opts = get_schain_container_cmd(schain_name)
- config_filepath = schain_config_filepath(schain_name, in_schain_container=True)
+ config_filepath = get_skaled_container_config_path(schain_name)
ssl_key_path, ssl_cert_path = get_ssl_filepath()
expected_opts = (
f'--config {config_filepath} -d /data_dir --ipcpath /data_dir --http-port 10003 '
@@ -35,7 +35,8 @@ def test_get_schain_container_cmd(schain_config, cert_key_pair):
)
assert container_opts == expected_opts
- container_opts = get_schain_container_cmd(schain_name, snapshot_from='1.1.1.1')
+ container_opts = get_schain_container_cmd(
+ schain_name, snapshot_from='1.1.1.1')
expected_opts = (
f'--config {config_filepath} -d /data_dir --ipcpath /data_dir --http-port 10003 '
f'--https-port 10008 --ws-port 10002 --wss-port 10007 --sgx-url {SGX_SERVER_URL} '
diff --git a/tests/schains/config/config_test.py b/tests/schains/config/config_test.py
index 681a751ef..c7856e028 100644
--- a/tests/schains/config/config_test.py
+++ b/tests/schains/config/config_test.py
@@ -1,3 +1,5 @@
+import os
+
import pytest
from core.schains.config.helper import (
@@ -6,6 +8,9 @@
get_own_ip_from_config,
get_schain_env
)
+from core.schains.config.directory import schain_config_dir
+from core.schains.config.file_manager import ConfigFileManager
+from core.schains.config.main import get_latest_finish_ts, get_rotation_ids_from_config
from core.schains.volume import get_schain_volume_config
from tools.configs.containers import SHARED_SPACE_CONTAINER_PATH, SHARED_SPACE_VOLUME_NAME
@@ -35,11 +40,48 @@ def test_get_schain_volume_config():
volume_config = get_schain_volume_config('test_name', '/mnt/mount_path/')
assert volume_config == {
'test_name': {'bind': '/mnt/mount_path/', 'mode': 'rw'},
- SHARED_SPACE_VOLUME_NAME: {'bind': SHARED_SPACE_CONTAINER_PATH, 'mode': 'rw'}
+ SHARED_SPACE_VOLUME_NAME: {
+ 'bind': SHARED_SPACE_CONTAINER_PATH, 'mode': 'rw'}
}
volume_config = get_schain_volume_config('test_name',
'/mnt/mount_path/', mode='Z')
assert volume_config == {
'test_name': {'bind': '/mnt/mount_path/', 'mode': 'Z'},
- SHARED_SPACE_VOLUME_NAME: {'bind': SHARED_SPACE_CONTAINER_PATH, 'mode': 'Z'}
+ SHARED_SPACE_VOLUME_NAME: {
+ 'bind': SHARED_SPACE_CONTAINER_PATH, 'mode': 'Z'}
}
+
+
+def test_get_schain_upstream_config(schain_db, upstreams):
+ name = schain_db
+ cfm = ConfigFileManager(schain_name=name)
+ upstream_config = cfm.latest_upstream_path
+ config_folder = schain_config_dir(name)
+ expected = os.path.join(
+ config_folder, f'schain_{name}_11_1687183339.json')
+ assert upstream_config == expected
+
+ not_existing_chain = 'not-exist'
+ cfm = ConfigFileManager(not_existing_chain)
+ assert not cfm.upstream_config_exists()
+ assert cfm.latest_upstream_config is None
+
+
+def test_get_latest_finish_ts(schain_config):
+ schain_config['skaleConfig']['sChain']['nodeGroups'].update(
+ {
+ '2': {'finish_ts': None},
+ '3': {'finish_ts': None}
+ }
+ )
+
+ finish_ts = get_latest_finish_ts(schain_config)
+ assert finish_ts == 1687180291
+ schain_config['skaleConfig']['sChain']['nodeGroups'].pop('0')
+ finish_ts = get_latest_finish_ts(schain_config)
+ assert finish_ts is None
+
+
+def test_get_rotation_ids_from_config(schain_config):
+ ids = get_rotation_ids_from_config(schain_config)
+ assert ids == [0, 1]
diff --git a/tests/schains/config/file_manager_test.py b/tests/schains/config/file_manager_test.py
new file mode 100644
index 000000000..d36617fee
--- /dev/null
+++ b/tests/schains/config/file_manager_test.py
@@ -0,0 +1,20 @@
+import os
+
+from core.schains.config.directory import schain_config_dir
+from core.schains.config.file_manager import ConfigFileManager
+
+from tools.configs.schains import SCHAINS_DIR_PATH
+
+
+def test_config_file_manager(schain_db, schain_config, upstreams):
+ name = schain_db
+ cfm = ConfigFileManager(schain_name=name)
+ assert cfm.skaled_config_path == os.path.join(
+ SCHAINS_DIR_PATH,
+ name,
+ f'schain_{name}.json'
+ )
+ assert cfm.latest_upstream_path == os.path.join(
+ schain_config_dir(name),
+ f'schain_{name}_11_1687183339.json'
+ )
diff --git a/tests/schains/monitor/action/config_action_test.py b/tests/schains/monitor/action/config_action_test.py
new file mode 100644
index 000000000..4bfaa780d
--- /dev/null
+++ b/tests/schains/monitor/action/config_action_test.py
@@ -0,0 +1,109 @@
+import shutil
+
+import pytest
+
+from core.schains.checks import ConfigChecks
+from core.schains.config.directory import schain_config_dir
+from core.schains.monitor.action import ConfigActionManager
+from core.schains.external_config import ExternalConfig
+from tools.helper import read_json
+from web.models.schain import SChainRecord
+
+from tests.utils import CONFIG_STREAM
+
+
+@pytest.fixture
+def rotation_data(schain_db, skale):
+ return skale.node_rotation.get_rotation(schain_db)
+
+
+@pytest.fixture
+def config_checks(
+ schain_db,
+ skale,
+ node_config,
+ schain_on_contracts,
+ estate,
+ rotation_data
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+ return ConfigChecks(
+ schain_name=name,
+ node_id=node_config.id,
+ schain_record=schain_record,
+ rotation_id=rotation_data['rotation_id'],
+ stream_version=CONFIG_STREAM,
+ estate=estate
+ )
+
+
+@pytest.fixture
+def config_am(
+ schain_db,
+ skale,
+ node_config,
+ schain_on_contracts,
+ predeployed_ima,
+ secret_key,
+ estate,
+ config_checks
+):
+ name = schain_db
+ rotation_data = skale.node_rotation.get_rotation(name)
+ schain = skale.schains.get_by_name(name)
+ return ConfigActionManager(
+ skale=skale,
+ schain=schain,
+ node_config=node_config,
+ rotation_data=rotation_data,
+ checks=config_checks,
+ stream_version=CONFIG_STREAM,
+ estate=estate
+ )
+
+
+def test_upstream_config_actions(config_am, config_checks):
+ config_am.config_dir()
+ assert config_checks.config_dir
+ assert not config_checks.upstream_config
+
+ # Folder created for secret key. Temporary moving
+ schain_folder = schain_config_dir(config_am.name)
+ tmp_schain_folder = '.' + schain_folder
+ try:
+ shutil.move(schain_folder, tmp_schain_folder)
+ assert not config_checks.config_dir
+ assert not config_checks.upstream_config
+ finally:
+ shutil.move(tmp_schain_folder, schain_folder)
+
+ # DKG action is tested separetely in dkg_test module
+
+ config_am.config_dir()
+ config_am.upstream_config()
+ assert config_checks.config_dir
+ assert config_checks.upstream_config
+
+ # Try to recreate config with no changes
+ config_am.upstream_config()
+ assert config_checks.upstream_config
+
+
+@pytest.fixture
+def empty_econfig(schain_db):
+ name = schain_db
+ return ExternalConfig(name)
+
+
+def test_external_state_config_actions(config_am, config_checks, empty_econfig):
+ config_am.config_dir()
+ assert not config_checks.external_state
+ assert config_am.external_state()
+ econfig_data = read_json(empty_econfig.path)
+ assert econfig_data == {
+ 'ima_linked': True,
+ 'chain_id': config_am.skale.web3.eth.chain_id,
+ 'ranges': [['1.1.1.1', '2.2.2.2'], ['3.3.3.3', '4.4.4.4']]
+ }
+ assert config_checks.external_state
diff --git a/tests/schains/monitor/action/skaled_action_test.py b/tests/schains/monitor/action/skaled_action_test.py
new file mode 100644
index 000000000..1e0e8966b
--- /dev/null
+++ b/tests/schains/monitor/action/skaled_action_test.py
@@ -0,0 +1,424 @@
+import datetime
+import json
+import os
+import time
+
+import freezegun
+import pytest
+import mock
+
+from core.schains.checks import SkaledChecks
+from core.schains.cleaner import remove_ima_container
+from core.schains.config.directory import schain_config_dir
+from core.schains.config.file_manager import UpstreamConfigFilename
+from core.schains.firewall.types import SChainRule
+from core.schains.monitor.action import SkaledActionManager
+from core.schains.runner import get_container_info
+from tools.configs.containers import SCHAIN_CONTAINER, IMA_CONTAINER
+from web.models.schain import SChainRecord
+
+CURRENT_TIMESTAMP = 1594903080
+CURRENT_DATETIME = datetime.datetime.utcfromtimestamp(CURRENT_TIMESTAMP)
+
+
+def run_ima_container_mock(schain: dict, mainnet_chain_id: int, image: str, dutils=None):
+ image_name, container_name, _, _ = get_container_info(
+ IMA_CONTAINER, schain['name'])
+ image = image or image_name
+ dutils.safe_rm(container_name)
+ dutils.run_container(
+ image_name=image,
+ name=container_name,
+ entrypoint='bash -c "while true; do foo; sleep 2; done"'
+ )
+
+
+def monitor_schain_container_mock(
+ schain,
+ schain_record,
+ skaled_status,
+ download_snapshot=False,
+ start_ts=None,
+ abort_on_exit=True,
+ dutils=None
+):
+ image_name, container_name, _, _ = get_container_info(
+ SCHAIN_CONTAINER, schain['name'])
+ dutils.safe_rm(container_name)
+ if not skaled_status.exit_time_reached or not abort_on_exit:
+ dutils.run_container(
+ image_name=image_name,
+ name=container_name,
+ entrypoint='bash -c "while true; do foo; sleep 2; done"'
+ )
+
+
+@pytest.fixture
+def skaled_checks(
+ schain_db,
+ skale,
+ rule_controller,
+ dutils
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+ return SkaledChecks(
+ schain_name=name,
+ schain_record=schain_record,
+ rule_controller=rule_controller,
+ dutils=dutils
+ )
+
+
+@pytest.fixture
+def skaled_am(
+ schain_db,
+ skale,
+ node_config,
+ rule_controller,
+ schain_on_contracts,
+ predeployed_ima,
+ secret_key,
+ ssl_folder,
+ dutils,
+ skaled_checks
+):
+ name = schain_db
+ schain = skale.schains.get_by_name(name)
+ return SkaledActionManager(
+ schain=schain,
+ rule_controller=rule_controller,
+ checks=skaled_checks,
+ node_config=node_config,
+ dutils=dutils
+ )
+
+
+def test_volume_action(skaled_am, skaled_checks):
+ try:
+ assert not skaled_checks.volume
+ skaled_am.volume()
+ assert skaled_checks.volume
+ skaled_am.volume()
+ assert skaled_checks.volume
+ finally:
+ skaled_am.cleanup_schain_docker_entity()
+
+
+def test_skaled_container_action(skaled_am, skaled_checks):
+ try:
+ with mock.patch(
+ 'core.schains.monitor.action.monitor_schain_container',
+ monitor_schain_container_mock
+ ):
+ skaled_am.volume()
+ assert not skaled_checks.skaled_container
+ skaled_am.skaled_container()
+ assert skaled_checks.skaled_container
+ finally:
+ skaled_am.cleanup_schain_docker_entity()
+
+
+def test_skaled_container_with_snapshot_action(skaled_am):
+ try:
+ skaled_am.volume()
+ with mock.patch(
+ 'core.schains.monitor.action.monitor_schain_container',
+ new=mock.Mock()
+ ) as monitor_schain_mock:
+ skaled_am.skaled_container(download_snapshot=True)
+
+ monitor_schain_mock.assert_called_with(
+ skaled_am.schain,
+ schain_record=skaled_am.schain_record,
+ skaled_status=skaled_am.skaled_status,
+ download_snapshot=True,
+ start_ts=None,
+ abort_on_exit=True,
+ dutils=skaled_am.dutils
+ )
+ assert monitor_schain_mock.call_count == 1
+ finally:
+ skaled_am.cleanup_schain_docker_entity()
+
+
+def test_skaled_container_snapshot_delay_start_action(skaled_am):
+ ts = int(time.time())
+ try:
+ skaled_am.volume()
+ with mock.patch(
+ 'core.schains.monitor.action.monitor_schain_container',
+ new=mock.Mock()
+ ) as monitor_schain_mock:
+ skaled_am.skaled_container(download_snapshot=True, start_ts=ts)
+
+ monitor_schain_mock.assert_called_with(
+ skaled_am.schain,
+ schain_record=skaled_am.schain_record,
+ skaled_status=skaled_am.skaled_status,
+ download_snapshot=True,
+ start_ts=ts,
+ abort_on_exit=True,
+ dutils=skaled_am.dutils
+ )
+ assert monitor_schain_mock.call_count == 1
+ finally:
+ skaled_am.cleanup_schain_docker_entity()
+
+
+def test_restart_skaled_container_action(skaled_am, skaled_checks):
+ try:
+ skaled_am.volume()
+ with mock.patch(
+ 'core.schains.monitor.action.monitor_schain_container',
+ monitor_schain_container_mock
+ ):
+ assert not skaled_checks.skaled_container
+ skaled_am.restart_skaled_container()
+ assert skaled_checks.skaled_container
+ skaled_am.restart_skaled_container()
+ assert skaled_checks.skaled_container
+ skaled_am.reloaded_skaled_container()
+ assert skaled_checks.skaled_container
+ skaled_am.reloaded_skaled_container()
+ assert skaled_checks.skaled_container
+ finally:
+ skaled_am.cleanup_schain_docker_entity()
+
+
+def test_restart_skaled_container_action_exit_reached(
+ skaled_am,
+ skaled_checks,
+ skaled_status_exit_time_reached
+):
+ try:
+ skaled_am.volume()
+ with mock.patch(
+ 'core.schains.monitor.action.monitor_schain_container',
+ monitor_schain_container_mock
+ ):
+ assert not skaled_checks.skaled_container
+ skaled_am.reloaded_skaled_container()
+ assert not skaled_checks.skaled_container
+ skaled_am.reloaded_skaled_container(abort_on_exit=False)
+ assert skaled_checks.skaled_container
+ finally:
+ skaled_am.cleanup_schain_docker_entity()
+
+
+@pytest.fixture
+def cleanup_ima(dutils, skaled_am):
+ try:
+ yield
+ finally:
+ remove_ima_container(skaled_am.name, dutils=dutils)
+
+
+@pytest.fixture
+def ima_linked(econfig):
+ state = econfig.get()
+ state.ima_linked = True
+ econfig.update(state)
+
+
+def test_recreated_schain_containers(
+ skaled_am,
+ skaled_checks,
+ ima_linked,
+ cleanup_ima,
+ schain_db,
+ dutils
+):
+ name = schain_db
+
+ skaled_am.volume()
+ skaled_am.recreated_schain_containers()
+ schain_container = f'skale_schain_{name}'
+ ima_container = f'skale_ima_{name}'
+ dutils.wait_for_container_creation(schain_container)
+ dutils.wait_for_container_creation(ima_container)
+ skaled_created_ts = dutils.get_container_created_ts(schain_container)
+ ima_created_ts = dutils.get_container_created_ts(ima_container)
+
+ skaled_am.recreated_schain_containers()
+ dutils.wait_for_container_creation(schain_container)
+ dutils.wait_for_container_creation(ima_container)
+
+ skaled_ts = dutils.get_container_created_ts(schain_container)
+ ima_ts = dutils.get_container_created_ts(ima_container)
+ assert skaled_ts > skaled_created_ts
+ assert ima_ts > ima_created_ts
+
+
+def test_ima_container_action_new_chain(
+ skaled_am,
+ skaled_checks,
+ schain_config,
+ predeployed_ima,
+ ima_linked,
+ cleanup_ima,
+ dutils
+):
+ with mock.patch(
+ 'core.schains.monitor.containers.run_ima_container',
+ run_ima_container_mock
+ ):
+ skaled_am.ima_container()
+ containers = dutils.get_all_ima_containers(all=True)
+ assert len(containers) == 1
+ container_name = containers[0].name
+ assert container_name == f'skale_ima_{skaled_am.name}'
+ image = dutils.get_container_image_name(container_name)
+ assert image == 'skalenetwork/ima:2.0.0-beta.9'
+
+
+@pytest.mark.skip('Docker API GA issues need to be resolved')
+@mock.patch('core.schains.monitor.containers.run_ima_container', run_ima_container_mock)
+def test_ima_container_action_old_chain(
+ skaled_am,
+ skaled_checks,
+ schain_config,
+ predeployed_ima,
+ ima_linked,
+ cleanup_ima,
+ dutils
+):
+ ts = int(time.time())
+ mts = ts + 3600
+ with mock.patch('core.schains.monitor.action.get_ima_migration_ts', return_value=mts):
+ skaled_am.ima_container()
+ containers = dutils.get_all_ima_containers(all=True)
+ assert len(containers) == 1
+ assert containers[0].name == f'skale_ima_{skaled_am.name}'
+ container_name = containers[0].name
+ assert container_name == f'skale_ima_{skaled_am.name}'
+ image = dutils.get_container_image_name(container_name)
+ assert image == 'skalenetwork/ima:2.0.0-develop.3'
+ assert dutils.pulled('skalenetwork/ima:2.0.0-beta.9')
+
+ mts = ts - 5
+ with mock.patch('core.schains.monitor.action.get_ima_migration_ts', return_value=mts):
+ skaled_am.ima_container()
+ containers = dutils.get_all_ima_containers(all=True)
+ assert len(containers) == 1
+ container_name = containers[0].name
+ assert container_name == f'skale_ima_{skaled_am.name}'
+ image = dutils.get_container_image_name(container_name)
+ assert image == 'skalenetwork/ima:2.0.0-beta.9'
+
+
+def test_ima_container_action_not_linked(
+ skaled_am,
+ skaled_checks,
+ schain_db,
+ _schain_name,
+ cleanup_ima_containers,
+ dutils
+):
+ skaled_am.ima_container()
+ assert skaled_checks.ima_container
+
+
+def test_cleanup_empty_action(skaled_am, skaled_checks):
+ skaled_am.cleanup_schain_docker_entity()
+ assert not skaled_checks.skaled_container
+
+
+def test_schain_finish_ts(skale, schain_on_contracts):
+ name = schain_on_contracts
+ max_node_id = skale.nodes.get_nodes_number() - 1
+ assert skale.node_rotation.get_schain_finish_ts(max_node_id, name) is None
+
+
+def test_display_skaled_logs(skale, skaled_am, _schain_name):
+ skaled_am.log_executed_blocks()
+ # Don't display if no container
+ skaled_am.display_skaled_logs()
+ try:
+ skaled_am.volume()
+ with mock.patch(
+ 'core.schains.monitor.action.monitor_schain_container',
+ monitor_schain_container_mock
+ ):
+ skaled_am.skaled_container()
+ finally:
+ skaled_am.display_skaled_logs()
+ skaled_am.cleanup_schain_docker_entity()
+
+
+@freezegun.freeze_time(CURRENT_DATETIME)
+def test_upd_schain_record(skaled_am, skaled_checks):
+ # Prepare fake record
+ r = SChainRecord.get_by_name(skaled_am.name)
+ r.set_restart_count(1)
+ r.set_failed_rpc_count(1)
+
+ assert r.monitor_last_seen != CURRENT_DATETIME
+ skaled_am._upd_last_seen()
+ r = SChainRecord.get_by_name(skaled_am.name)
+ assert r.monitor_last_seen == CURRENT_DATETIME
+ skaled_am._upd_schain_record()
+ r = SChainRecord.get_by_name(skaled_am.name)
+
+ assert not r.first_run
+ assert not r.new_schain
+ r.restart_count == 0
+ r.failed_rpc_count == 0
+
+
+def test_update_config(skaled_am, skaled_checks):
+ folder = schain_config_dir(skaled_am.name)
+ config_path = os.path.join(folder, f'schain_{skaled_am.name}.json')
+ os.remove(config_path)
+ assert not skaled_checks.config
+
+ assert not skaled_checks.config_updated
+ upstream_path = UpstreamConfigFilename(
+ skaled_am.name, rotation_id=5, ts=int(time.time())).abspath(folder)
+
+ config_content = {'config': 'mock_v5'}
+ with open(upstream_path, 'w') as upstream_file:
+ json.dump(config_content, upstream_file)
+ skaled_am.update_config()
+ with open(config_path) as config_file:
+ json.load(config_file) == config_content
+ assert skaled_checks.config
+ assert skaled_checks.config_updated
+
+ time.sleep(1)
+ upstream_path = UpstreamConfigFilename(
+ skaled_am.name, rotation_id=6, ts=int(time.time())).abspath(folder)
+
+ config_content = {'config': 'mock_v6'}
+ with open(upstream_path, 'w') as upstream_file:
+ json.dump(config_content, upstream_file)
+
+ assert skaled_checks.config
+ assert not skaled_checks.config_updated
+ skaled_am.update_config()
+
+ assert skaled_checks.config_updated
+
+
+def test_firewall_rules_action(skaled_am, skaled_checks, rule_controller, econfig):
+ assert not skaled_checks.firewall_rules
+ skaled_am.firewall_rules()
+ assert skaled_checks.firewall_rules
+ added_rules = list(rule_controller.firewall_manager.rules)
+ print(added_rules)
+ assert added_rules == [
+ SChainRule(port=10000, first_ip='127.0.0.2', last_ip='127.0.0.2'),
+ SChainRule(port=10001, first_ip='1.1.1.1', last_ip='2.2.2.2'),
+ SChainRule(port=10001, first_ip='127.0.0.2', last_ip='127.0.0.2'),
+ SChainRule(port=10001, first_ip='3.3.3.3', last_ip='4.4.4.4'),
+ SChainRule(port=10002),
+ SChainRule(port=10003),
+ SChainRule(port=10004, first_ip='127.0.0.2', last_ip='127.0.0.2'),
+ SChainRule(port=10005, first_ip='1.1.1.1', last_ip='2.2.2.2'),
+ SChainRule(port=10005, first_ip='127.0.0.2', last_ip='127.0.0.2'),
+ SChainRule(port=10005, first_ip='3.3.3.3', last_ip='4.4.4.4'),
+ SChainRule(port=10007),
+ SChainRule(port=10008),
+ SChainRule(port=10009),
+ SChainRule(port=10010, first_ip='127.0.0.2', last_ip='127.0.0.2')
+ ]
diff --git a/tests/schains/monitor/base_monitor_test.py b/tests/schains/monitor/base_monitor_test.py
deleted file mode 100644
index ecd1c33bc..000000000
--- a/tests/schains/monitor/base_monitor_test.py
+++ /dev/null
@@ -1,418 +0,0 @@
-import time
-from datetime import datetime
-
-import mock
-import pytest
-
-from core.schains.checks import SChainChecks
-from core.schains.cleaner import remove_ima_container
-from core.schains.config.main import save_schain_config
-from core.schains.ima import ImaData
-from core.schains.monitor import BaseMonitor
-from core.schains.runner import get_container_info
-from tools.configs.containers import SCHAIN_CONTAINER, IMA_CONTAINER
-from web.models.schain import SChainRecord
-
-from tests.dkg_utils import safe_run_dkg_mock
-from tests.utils import get_test_rule_controller
-
-
-class BaseTestMonitor(BaseMonitor):
- @BaseMonitor.monitor_runner
- def run(self):
- return 1234
-
- def _run_all_checks(self):
- pass
-
-
-class CrashingTestMonitor(BaseMonitor):
- @BaseMonitor.monitor_runner
- def run(self):
- raise Exception('Something went wrong')
-
- def _run_all_checks(self):
- pass
-
-
-def init_schain_config_mock(
- skale,
- node_id,
- schain_name,
- generation,
- ecdsa_sgx_key_name,
- rotation_data,
- schain_record
-):
- save_schain_config({}, schain_name)
-
-
-def monitor_schain_container_mock(
- schain,
- schain_record,
- skaled_status,
- public_key=None,
- start_ts=None,
- dutils=None
-):
- image_name, container_name, _, _ = get_container_info(
- SCHAIN_CONTAINER, schain['name'])
- dutils.run_container(
- image_name=image_name,
- name=container_name,
- entrypoint='bash -c "while true; do foo; sleep 2; done"'
- )
-
-
-def monitor_ima_container(
- schain,
- public_key=None,
- start_ts=None,
- dutils=None
-):
- image_name, container_name, _, _ = get_container_info(
- IMA_CONTAINER, schain['name'])
- dutils.run_container(
- image_name=image_name,
- name=container_name,
- entrypoint='bash -c "while true; do foo; sleep 2; done"'
- )
-
-
-def run_ima_container_mock(schain: dict, mainnet_chain_id: int, image: str, dutils=None):
- default_image, container_name, _, _ = get_container_info(
- IMA_CONTAINER, schain['name'])
- image = image or default_image
- dutils.safe_rm(container_name)
- dutils.run_container(
- image_name=image,
- name=container_name,
- entrypoint='bash -c "while true; do foo; sleep 2; done"'
- )
-
-
-@pytest.fixture
-def test_monitor(
- schain_db,
- _schain_name,
- node_config,
- uninited_rule_controller,
- skale,
- ima_data,
- dutils
-):
- schain_record = SChainRecord.get_by_name(_schain_name)
- schain_checks = SChainChecks(
- _schain_name,
- node_config.id,
- schain_record=schain_record,
- rule_controller=uninited_rule_controller,
- dutils=dutils
- )
- return BaseTestMonitor(
- skale=skale,
- ima_data=ima_data,
- schain={'name': schain_db, 'partOfNode': 0, 'generation': 0},
- node_config=node_config,
- rotation_data={'rotation_id': 0, 'finish_ts': 0, 'leaving_node': 1},
- checks=schain_checks,
- rule_controller=uninited_rule_controller,
- dutils=dutils
- )
-
-
-def test_crashing_monitor(
- schain_db,
- _schain_name,
- skale,
- node_config,
- rule_controller,
- ima_data,
- schain_struct,
- dutils
-):
- schain_record = SChainRecord.get_by_name(_schain_name)
- schain_checks = SChainChecks(
- _schain_name,
- node_config.id,
- schain_record=schain_record,
- rule_controller=rule_controller,
- dutils=dutils
- )
- test_monitor = CrashingTestMonitor(
- skale=skale,
- ima_data=ima_data,
- schain=schain_struct,
- node_config=node_config,
- rotation_data={'rotation_id': 1, 'leaving_node': 1},
- checks=schain_checks,
- rule_controller=rule_controller,
- dutils=dutils
- )
- with pytest.raises(Exception):
- test_monitor.run()
-
-
-def test_base_monitor(test_monitor):
- assert test_monitor.run() == 1234
-
-
-def test_base_monitor_config_dir(test_monitor):
- assert not test_monitor.config_dir()
- assert test_monitor.config_dir()
-
-
-def test_base_monitor_dkg(test_monitor):
- test_monitor.config_dir()
- with mock.patch('core.schains.monitor.base_monitor.safe_run_dkg', safe_run_dkg_mock):
- assert not test_monitor.dkg()
- assert test_monitor.dkg()
-
-
-def test_base_monitor_config(test_monitor):
- test_monitor.config_dir()
- with mock.patch(
- 'core.schains.monitor.base_monitor.init_schain_config', init_schain_config_mock):
- assert not test_monitor.config()
- assert test_monitor.config()
-
-
-def test_base_monitor_volume(test_monitor):
- test_monitor.config_dir()
- assert not test_monitor.volume()
- assert test_monitor.volume()
- test_monitor.cleanup_schain_docker_entity()
-
-
-def test_base_monitor_skaled_container(test_monitor):
- test_monitor.volume()
- with mock.patch(
- 'core.schains.monitor.base_monitor.monitor_schain_container',
- monitor_schain_container_mock
- ):
- assert not test_monitor.skaled_container()
- assert test_monitor.skaled_container()
- test_monitor.cleanup_schain_docker_entity()
-
-
-def test_base_monitor_skaled_container_sync(test_monitor):
- test_monitor.volume()
- with mock.patch(
- 'core.schains.monitor.base_monitor.monitor_schain_container',
- new=mock.Mock()
- ) as monitor_schain_mock:
- test_monitor.skaled_container(download_snapshot=True)
-
- monitor_schain_mock.assert_called_with(
- test_monitor.schain,
- schain_record=test_monitor.schain_record,
- skaled_status=test_monitor.skaled_status,
- public_key='0:0:1:0',
- start_ts=None,
- dutils=test_monitor.dutils
- )
- assert monitor_schain_mock.call_count == 1
-
-
-def test_base_monitor_skaled_container_sync_delay_start(test_monitor):
- test_monitor.volume()
- with mock.patch(
- 'core.schains.monitor.base_monitor.monitor_schain_container',
- new=mock.Mock()
- ) as monitor_schain_mock:
- test_monitor.finish_ts = 1245
- test_monitor.skaled_container(download_snapshot=True, delay_start=True)
-
- monitor_schain_mock.assert_called_with(
- test_monitor.schain,
- schain_record=test_monitor.schain_record,
- skaled_status=test_monitor.skaled_status,
- public_key='0:0:1:0',
- start_ts=1245,
- dutils=test_monitor.dutils
- )
- assert monitor_schain_mock.call_count == 1
-
-
-def test_base_monitor_restart_skaled_container(test_monitor):
- test_monitor.volume()
- with mock.patch(
- 'core.schains.monitor.base_monitor.monitor_schain_container',
- monitor_schain_container_mock
- ):
- assert not test_monitor.restart_skaled_container()
- assert test_monitor.restart_skaled_container()
- test_monitor.cleanup_schain_docker_entity()
-
-
-@pytest.fixture
-def cleanup_ima(dutils):
- try:
- yield
- finally:
- remove_ima_container(test_monitor.name, dutils=test_monitor.dutils)
- dutils.remove_container()
-
-
-def test_base_monitor_ima_container_new_chain(
- test_monitor,
- schain_config,
- predeployed_ima,
- cleanup_ima_containers,
- dutils
-):
- test_monitor.config_dir()
- test_monitor.ima_data.linked = True
- with mock.patch(
- 'core.schains.monitor.containers.run_ima_container',
- run_ima_container_mock
- ):
- test_monitor.ima_container()
- containers = dutils.get_all_ima_containers(all=True)
- assert len(containers) == 1
- container_name = containers[0].name
- assert container_name == f'skale_ima_{test_monitor.name}'
- image = dutils.get_container_image_name(container_name)
- assert image == 'skalenetwork/ima:2.0.0-develop.3'
-
-
-@mock.patch('core.schains.monitor.containers.run_ima_container', run_ima_container_mock)
-def test_base_monitor_ima_container_old_chain(
- test_monitor,
- schain_config,
- predeployed_ima,
- cleanup_ima_containers,
- dutils
-):
- test_monitor.config_dir()
- test_monitor.ima_data.linked = True
-
- ts = int(time.time())
- mts = ts + 3600
- with mock.patch('core.schains.monitor.base_monitor.get_ima_migration_ts', return_value=mts):
- test_monitor.ima_container()
- containers = dutils.get_all_ima_containers(all=True)
- assert len(containers) == 1
- assert containers[0].name == f'skale_ima_{test_monitor.name}'
- container_name = containers[0].name
- assert container_name == f'skale_ima_{test_monitor.name}'
- image = dutils.get_container_image_name(container_name)
- assert image == 'skalenetwork/ima:1.3.4-beta.5'
- assert dutils.pulled('skalenetwork/ima:2.0.0-develop.3')
-
- mts = ts - 5
- with mock.patch('core.schains.monitor.base_monitor.get_ima_migration_ts', return_value=mts):
- test_monitor.ima_container()
- containers = dutils.get_all_ima_containers(all=True)
- assert len(containers) == 1
- container_name = containers[0].name
- assert container_name == f'skale_ima_{test_monitor.name}'
- image = dutils.get_container_image_name(container_name)
- assert image == 'skalenetwork/ima:2.0.0-develop.3'
-
-
-def test_base_monitor_ima_container_not_linked(
- schain_db,
- _schain_name,
- node_config,
- skale,
- dutils
-):
- schain_record = SChainRecord.get_by_name(_schain_name)
- schain_checks = SChainChecks(
- _schain_name,
- node_config.id,
- schain_record=schain_record,
- rule_controller=get_test_rule_controller(_schain_name),
- dutils=dutils
- )
- ima_data = ImaData(False, '0x1')
- test_monitor = BaseTestMonitor(
- skale=skale,
- ima_data=ima_data,
- schain={'name': schain_db, 'partOfNode': 0, 'generation': 0},
- node_config=node_config,
- rotation_data={'rotation_id': 0, 'leaving_node': 1},
- checks=schain_checks,
- rule_controller=get_test_rule_controller(_schain_name),
- dutils=dutils
- )
-
- test_monitor.config_dir()
- assert not test_monitor.ima_container()
- assert not test_monitor.ima_container()
- remove_ima_container(test_monitor.name, dutils=test_monitor.dutils)
-
-
-def test_base_monitor_cleanup(test_monitor):
- test_monitor.volume()
- with mock.patch(
- 'core.schains.monitor.base_monitor.monitor_schain_container',
- monitor_schain_container_mock
- ):
- test_monitor.skaled_container()
-
- assert test_monitor.checks.volume.status
- assert test_monitor.checks.skaled_container.status
- test_monitor.cleanup_schain_docker_entity()
- assert not test_monitor.checks.volume.status
- assert not test_monitor.checks.skaled_container.status
-
-
-def test_schain_finish_ts(skale, schain_on_contracts):
- name = schain_on_contracts
- max_node_id = skale.nodes.get_nodes_number() - 1
- assert skale.node_rotation.get_schain_finish_ts(max_node_id, name) is None
-
-
-def test_display_skaled_logs(skale, test_monitor, _schain_name):
- test_monitor.volume()
- with mock.patch(
- 'core.schains.monitor.base_monitor.monitor_schain_container',
- monitor_schain_container_mock
- ):
- test_monitor.skaled_container()
- test_monitor.display_skaled_logs()
-
-
-def test_reloaded_schain_containers(
- skale,
- test_monitor,
- schain_db,
- cleanup_schain_containers,
- cleanup_ima_containers,
- cert_key_pair,
- schain_config,
- dutils
-):
- name = schain_db
-
- test_monitor.volume()
- test_monitor.recreated_schain_containers()
- schain_container = f'skale_schain_{name}'
- ima_container = f'skale_ima_{name}'
- dutils.wait_for_container_creation(schain_container)
- dutils.wait_for_container_creation(ima_container)
- info = dutils.get_info(schain_container)
- print(info)
- skaled_iso_created_time = info['stats']['Created'].split('.')[0]
- skaled_created_ts = int(datetime.fromisoformat(skaled_iso_created_time).timestamp())
-
- info = dutils.get_info(ima_container)
- ima_iso_created_time = info['stats']['Created'].split('.')[0]
- ima_created_ts = int(datetime.fromisoformat(ima_iso_created_time).timestamp())
-
- test_monitor.recreated_schain_containers()
- dutils.wait_for_container_creation(schain_container)
- dutils.wait_for_container_creation(ima_container)
-
- info = dutils.get_info(schain_container)
- skaled_iso_time = info['stats']['Created'].split('.')[0]
- skaled_ts = int(datetime.fromisoformat(skaled_iso_time).timestamp())
-
- info = dutils.get_info(ima_container)
- ima_iso_time = info['stats']['Created'].split('.')[0]
- ima_ts = int(datetime.fromisoformat(ima_iso_time).timestamp())
-
- assert skaled_ts > skaled_created_ts
- assert ima_ts > ima_created_ts
diff --git a/tests/schains/monitor/config_monitor_test.py b/tests/schains/monitor/config_monitor_test.py
new file mode 100644
index 000000000..5fa5a823c
--- /dev/null
+++ b/tests/schains/monitor/config_monitor_test.py
@@ -0,0 +1,90 @@
+import glob
+import os
+
+import pytest
+
+from core.schains.checks import ConfigChecks
+from core.schains.config.directory import schain_config_dir
+
+from core.schains.monitor.action import ConfigActionManager
+from core.schains.monitor.config_monitor import RegularConfigMonitor
+
+from web.models.schain import SChainRecord
+
+from tests.utils import CONFIG_STREAM
+
+
+@pytest.fixture
+def rotation_data(schain_db, skale):
+ return skale.node_rotation.get_rotation(schain_db)
+
+
+@pytest.fixture
+def config_checks(
+ schain_db,
+ skale,
+ node_config,
+ schain_on_contracts,
+ rotation_data,
+ estate
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+ return ConfigChecks(
+ schain_name=name,
+ node_id=node_config.id,
+ schain_record=schain_record,
+ rotation_id=rotation_data['rotation_id'],
+ stream_version=CONFIG_STREAM,
+ estate=estate
+ )
+
+
+@pytest.fixture
+def config_am(
+ schain_db,
+ skale,
+ node_config,
+ schain_on_contracts,
+ predeployed_ima,
+ secret_key,
+ config_checks,
+ estate
+):
+ name = schain_db
+ rotation_data = skale.node_rotation.get_rotation(name)
+ schain = skale.schains.get_by_name(name)
+
+ am = ConfigActionManager(
+ skale=skale,
+ schain=schain,
+ node_config=node_config,
+ rotation_data=rotation_data,
+ stream_version=CONFIG_STREAM,
+ checks=config_checks,
+ estate=estate
+ )
+ am.dkg = lambda s: True
+ return am
+
+
+@pytest.fixture
+def regular_config_monitor(config_am, config_checks):
+ return RegularConfigMonitor(
+ action_manager=config_am,
+ checks=config_checks
+ )
+
+
+def test_regular_config_monitor(schain_db, regular_config_monitor, rotation_data):
+ name = schain_db
+ rotation_id = rotation_data['rotation_id']
+ regular_config_monitor.run()
+ config_dir = schain_config_dir(name)
+
+ pattern = os.path.join(
+ config_dir,
+ f'schain_{name}_{rotation_id}_*.json'
+ )
+ filenames = glob.glob(pattern)
+ assert os.path.isfile(filenames[0])
diff --git a/tests/schains/monitor/containers.py b/tests/schains/monitor/containers_test.py
similarity index 62%
rename from tests/schains/monitor/containers.py
rename to tests/schains/monitor/containers_test.py
index 284d44b13..b8e806f06 100644
--- a/tests/schains/monitor/containers.py
+++ b/tests/schains/monitor/containers_test.py
@@ -4,7 +4,7 @@
from core.schains.runner import is_container_exists
from web.models.schain import upsert_schain_record
-from tests.schains.monitor.main_test import run_exited_schain_container
+from tests.utils import run_custom_schain_container
def test_monitor_schain_container(
@@ -37,7 +37,6 @@ def test_monitor_schain_container_exit_time_reached(
schain_record = upsert_schain_record(schain_db)
schain = {'name': schain_db, 'partOfNode': 0, 'generation': 0}
- run_exited_schain_container(dutils, schain_db, 0)
with mock.patch('core.schains.monitor.containers.is_volume_exists', return_value=True):
schain_record.set_failed_rpc_count(100)
schain_record.set_restart_count(100)
@@ -47,31 +46,18 @@ def test_monitor_schain_container_exit_time_reached(
skaled_status_exit_time_reached,
dutils=dutils
)
+ assert len(dutils.get_all_schain_containers()) == 0
assert schain_record.restart_count == 0
assert schain_record.failed_rpc_count == 0
-
-def test_monitor_schain_container_cleanup(
- schain_db,
- skaled_status_repair,
- dutils,
- ssl_folder,
- schain_config,
- cleanup_schain_containers
-):
- schain_record = upsert_schain_record(schain_db)
- schain = {'name': schain_db, 'partOfNode': 0, 'generation': 0}
-
- run_exited_schain_container(dutils, schain_db, 0)
- with mock.patch('core.schains.monitor.containers.is_volume_exists', return_value=True):
- schain_record.set_failed_rpc_count(100)
- schain_record.set_restart_count(100)
monitor_schain_container(
schain,
schain_record,
- skaled_status_repair,
+ skaled_status_exit_time_reached,
+ abort_on_exit=False,
dutils=dutils
)
+ assert len(dutils.get_all_schain_containers()) == 1
assert schain_record.restart_count == 0
assert schain_record.failed_rpc_count == 0
@@ -86,8 +72,9 @@ def test_monitor_schain_container_ec(
):
schain_record = upsert_schain_record(schain_db)
schain = {'name': schain_db, 'partOfNode': 0, 'generation': 0}
+ schain_name = schain_db
- run_exited_schain_container(dutils, schain_db, 123)
+ run_custom_schain_container(dutils, schain_name, entrypoint=['sh', 'exit', '1'])
with mock.patch('core.schains.monitor.containers.is_volume_exists', return_value=True):
schain_record.set_failed_rpc_count(100)
schain_record.set_restart_count(0)
@@ -99,28 +86,3 @@ def test_monitor_schain_container_ec(
)
assert schain_record.restart_count == 1
assert schain_record.failed_rpc_count == 0
-
-
-def test_monitor_schain_container_ec_0(
- schain_db,
- skaled_status,
- dutils,
- ssl_folder,
- schain_config,
- cleanup_schain_containers
-):
- schain_record = upsert_schain_record(schain_db)
- schain = {'name': schain_db, 'partOfNode': 0, 'generation': 0}
-
- run_exited_schain_container(dutils, schain_db, 0)
- with mock.patch('core.schains.monitor.containers.is_volume_exists', return_value=True):
- schain_record.set_failed_rpc_count(100)
- schain_record.set_restart_count(0)
- monitor_schain_container(
- schain,
- schain_record,
- skaled_status,
- dutils=dutils
- )
- assert schain_record.restart_count == 0
- assert schain_record.failed_rpc_count == 100
diff --git a/tests/schains/monitor/main_test.py b/tests/schains/monitor/main_test.py
index 41ead27eb..3c094ab4b 100644
--- a/tests/schains/monitor/main_test.py
+++ b/tests/schains/monitor/main_test.py
@@ -1,173 +1,16 @@
-import os
import mock
+from concurrent.futures import ThreadPoolExecutor
import pytest
-from core.schains.checks import SChainChecks, CheckRes
-from core.schains.config.directory import schain_config_dir
from core.schains.firewall.types import IpRange
-from core.schains.monitor.main import (
- run_monitor_for_schain, get_monitor_type, BackupMonitor, RepairMonitor, PostRotationMonitor,
- RotationMonitor, RegularMonitor, ReloadMonitor
-)
-from core.schains.runner import get_container_info
from core.schains.firewall.utils import get_sync_agent_ranges
+from core.schains.monitor.main import run_monitor_for_schain
+from core.schains.task import Task
-from tools.configs.containers import SCHAIN_CONTAINER
from tools.helper import is_node_part_of_chain
from web.models.schain import upsert_schain_record
-from tests.schains.monitor.base_monitor_test import BaseTestMonitor, CrashingTestMonitor
-
-
-class SChainChecksMock(SChainChecks):
- @property
- def skaled_container(self) -> CheckRes:
- return CheckRes(True)
-
-
-class SChainChecksMockBad(SChainChecks):
- @property
- def skaled_container(self) -> CheckRes:
- return CheckRes(False)
-
-
-@pytest.fixture
-def checks(
- schain_db,
- _schain_name,
- rule_controller,
- node_config,
- ima_data,
- dutils
-):
- schain_record = upsert_schain_record(schain_db)
- return SChainChecksMock(
- _schain_name,
- node_config.id,
- schain_record,
- rule_controller=rule_controller,
- dutils=dutils
- )
-
-
-@pytest.fixture
-def bad_checks(
- schain_db,
- _schain_name,
- rule_controller,
- node_config,
- ima_data,
- dutils
-):
- schain_record = upsert_schain_record(schain_db)
- return SChainChecksMockBad(
- _schain_name,
- node_config.id,
- schain_record,
- rule_controller=rule_controller,
- dutils=dutils
- )
-
-
-def run_exited_schain_container(dutils, schain_name: str, exit_code: int):
- image_name, container_name, _, _ = get_container_info(
- SCHAIN_CONTAINER, schain_name)
- dutils.safe_rm(container_name)
- dutils.run_container(
- image_name=image_name,
- name=container_name,
- entrypoint=f'bash -c "exit {exit_code}"'
- )
-
-
-def test_is_backup_mode(schain_db, checks, skaled_status):
- schain_record = upsert_schain_record(schain_db)
- assert get_monitor_type(schain_record, checks, False, skaled_status) != BackupMonitor
- schain_record.set_new_schain(False)
- with mock.patch('core.schains.monitor.main.BACKUP_RUN', True):
- assert get_monitor_type(schain_record, checks, False, skaled_status) == BackupMonitor
-
-
-def test_is_repair_mode(schain_db, checks, skaled_status):
- schain_record = upsert_schain_record(schain_db)
-
- assert get_monitor_type(schain_record, checks, False, skaled_status) != RepairMonitor
- schain_record.set_repair_mode(True)
- assert get_monitor_type(schain_record, checks, False, skaled_status) == RepairMonitor
-
- schain_record.set_repair_mode(False)
- assert get_monitor_type(schain_record, checks, False, skaled_status) != RepairMonitor
-
-
-def test_is_repair_mode_skaled_status(schain_db, checks, bad_checks, skaled_status_repair):
- schain_record = upsert_schain_record(schain_db)
- schain_record.set_repair_mode(False)
- assert get_monitor_type(
- schain_record, checks, False, skaled_status_repair) != RepairMonitor
- assert get_monitor_type(
- schain_record, bad_checks, False, skaled_status_repair) == RepairMonitor
-
-
-def test_not_post_rotation_mode(schain_db, checks, skaled_status):
- schain_record = upsert_schain_record(schain_db)
- assert get_monitor_type(schain_record, checks, False, skaled_status) != PostRotationMonitor
-
-
-def test_is_post_rotation_mode(schain_db, bad_checks, skaled_status_exit_time_reached):
- schain_record = upsert_schain_record(schain_db)
- schain_dir_path = schain_config_dir(schain_db)
- os.makedirs(schain_dir_path, exist_ok=True)
- assert get_monitor_type(
- schain_record, bad_checks, False, skaled_status_exit_time_reached) == PostRotationMonitor
-
-
-def test_is_rotation_mode(schain_db, checks, skaled_status):
- schain_record = upsert_schain_record(schain_db)
- assert get_monitor_type(schain_record, checks, False, skaled_status) != RotationMonitor
- assert get_monitor_type(schain_record, checks, True, skaled_status) == RotationMonitor
-
-
-def test_is_regular_mode(schain_db, checks, skaled_status):
- schain_record = upsert_schain_record(schain_db)
- assert get_monitor_type(schain_record, checks, True, skaled_status) != RegularMonitor
- assert get_monitor_type(schain_record, checks, False, skaled_status) == RegularMonitor
-
-
-def test_not_is_reload_mode(schain_db, checks, bad_checks, skaled_status):
- schain_record = upsert_schain_record(schain_db)
- assert get_monitor_type(schain_record, checks, False, skaled_status) != ReloadMonitor
- assert get_monitor_type(schain_record, bad_checks, False, skaled_status) != ReloadMonitor
-
-
-def test_is_reload_mode(schain_db, checks, bad_checks, skaled_status_reload):
- schain_record = upsert_schain_record(schain_db)
- assert get_monitor_type(schain_record, checks, False, skaled_status_reload) != ReloadMonitor
- schain_record.set_needs_reload(True)
- assert get_monitor_type(schain_record, bad_checks, False, skaled_status_reload) == ReloadMonitor
-
-
-def test_run_monitor_for_schain(skale, skale_ima, node_config, schain_db, dutils):
- with mock.patch('core.schains.monitor.main.RegularMonitor', CrashingTestMonitor), \
- mock.patch('core.schains.monitor.main.is_node_part_of_chain', return_value=True):
- assert not run_monitor_for_schain(
- skale,
- skale_ima,
- node_config,
- {'name': schain_db, 'partOfNode': 0, 'generation': 0},
- once=True,
- dutils=dutils
- )
- with mock.patch('core.schains.monitor.main.RegularMonitor', BaseTestMonitor):
- assert run_monitor_for_schain(
- skale,
- skale_ima,
- node_config,
- {'name': schain_db, 'partOfNode': 0, 'generation': 0},
- once=True,
- dutils=dutils
- )
-
@pytest.fixture
def sync_ranges(skale):
@@ -204,3 +47,47 @@ def test_is_node_part_of_chain(skale, schain_on_contracts, node_config):
node_exist_node = 10000
chain_on_node = is_node_part_of_chain(skale, schain_on_contracts, node_exist_node)
assert not chain_on_node
+
+
+def test_run_monitor_for_schain(
+ skale,
+ skale_ima,
+ schain_on_contracts,
+ node_config,
+ schain_db,
+ dutils
+):
+ with mock.patch('core.schains.monitor.main.keep_tasks_running') as keep_tasks_running_mock:
+ run_monitor_for_schain(
+ skale,
+ skale_ima,
+ node_config,
+ schain={'name': schain_db, 'partOfNode': 0, 'generation': 0},
+ dutils=dutils,
+ once=True
+ )
+ assert isinstance(keep_tasks_running_mock.call_args[0][0], ThreadPoolExecutor)
+ assert isinstance(keep_tasks_running_mock.call_args[0][1][0], Task)
+ assert isinstance(keep_tasks_running_mock.call_args[0][1][1], Task)
+ assert keep_tasks_running_mock.call_args[0][2] == [None, None]
+
+
+def test_run_monitor_for_schain_left(
+ skale,
+ skale_ima,
+ node_config,
+ schain_db,
+ dutils
+):
+ schain_not_exists = 'not-on-node'
+ upsert_schain_record(schain_not_exists)
+ with mock.patch('core.schains.monitor.main.keep_tasks_running') as keep_tasks_running_mock:
+ run_monitor_for_schain(
+ skale,
+ skale_ima,
+ node_config,
+ schain={'name': schain_not_exists, 'partOfNode': 0, 'generation': 0},
+ dutils=dutils,
+ once=True
+ )
+ keep_tasks_running_mock.assert_not_called()
diff --git a/tests/schains/monitor/regular_monitor_test.py b/tests/schains/monitor/regular_monitor_test.py
deleted file mode 100644
index 3395adab0..000000000
--- a/tests/schains/monitor/regular_monitor_test.py
+++ /dev/null
@@ -1,117 +0,0 @@
-import logging
-import platform
-
-import mock
-
-from skale.schain_config.generator import get_nodes_for_schain
-from skale.wallets import SgxWallet
-from skale.utils.helper import ip_from_bytes
-
-from core.schains.runner import get_container_name
-from core.schains.checks import SChainChecks
-from core.schains.monitor import RegularMonitor
-from core.schains.ima import ImaData
-
-from tools.configs import SGX_CERTIFICATES_FOLDER, SGX_SERVER_URL
-from tools.configs.containers import SCHAIN_CONTAINER
-
-from web.models.schain import SChainRecord
-
-from tests.dkg_utils import safe_run_dkg_mock, get_bls_public_keys
-from tests.utils import (
- alter_schain_config,
- get_test_rule_controller,
- no_schain_artifacts,
- upsert_schain_record_with_config
-)
-
-
-logger = logging.getLogger(__name__)
-
-
-def test_regular_monitor(
- schain_db,
- skale,
- node_config,
- skale_ima,
- dutils,
- ssl_folder,
- schain_on_contracts,
- predeployed_ima
-):
- schain_name = schain_on_contracts
- upsert_schain_record_with_config(schain_name)
-
- schain = skale.schains.get_by_name(schain_name)
- nodes = get_nodes_for_schain(skale, schain_name)
-
- # not using rule_controller fixture to avoid config generation
- rc = get_test_rule_controller(name=schain_name)
-
- sgx_wallet = SgxWallet(
- web3=skale.web3,
- sgx_endpoint=SGX_SERVER_URL,
- path_to_cert=SGX_CERTIFICATES_FOLDER
- )
-
- node_config.id = nodes[0]['id']
- node_config.ip = ip_from_bytes(nodes[0]['ip'])
- node_config.sgx_key_name = sgx_wallet.key_name
-
- schain_record = SChainRecord.get_by_name(schain_name)
- schain_checks = SChainChecks(
- schain_name,
- node_config.id,
- schain_record=schain_record,
- rule_controller=rc,
- dutils=dutils
- )
- ima_data = ImaData(False, '0x1')
- test_monitor = RegularMonitor(
- skale=skale,
- ima_data=ima_data,
- schain=schain,
- node_config=node_config,
- rotation_data={'rotation_id': 0, 'leaving_node': 1},
- checks=schain_checks,
- rule_controller=rc,
- dutils=dutils
- )
-
- with no_schain_artifacts(schain['name'], dutils):
- with mock.patch(
- 'core.schains.monitor.base_monitor.safe_run_dkg',
- safe_run_dkg_mock
- ), mock.patch(
- 'skale.schain_config.rotation_history._compose_bls_public_key_info',
- return_value=get_bls_public_keys()
- ):
- test_monitor.run()
-
- assert schain_checks.config_dir.status
- assert schain_checks.dkg.status
- assert schain_checks.config.status
- assert schain_checks.volume.status
- if not schain_checks.skaled_container.status:
- container_name = get_container_name(SCHAIN_CONTAINER, schain['name'])
- print(dutils.display_container_logs(container_name))
- assert schain_checks.skaled_container.status
- assert not schain_checks.ima_container.status
-
- test_monitor.cleanup_schain_docker_entity()
- alter_schain_config(schain_name, sgx_wallet.public_key)
-
- with mock.patch(
- 'skale.schain_config.rotation_history._compose_bls_public_key_info',
- return_value=get_bls_public_keys()
- ):
- test_monitor.run()
-
- assert schain_checks.volume.status
- assert schain_checks.skaled_container.status
-
- if platform.system() != 'Darwin': # not working due to the macOS networking in Docker
- assert schain_checks.rpc.status
- assert schain_checks.blocks.status
-
- test_monitor.cleanup_schain_docker_entity()
diff --git a/tests/schains/monitor/reload_monitor_test.py b/tests/schains/monitor/reload_monitor_test.py
deleted file mode 100644
index a6dd69a21..000000000
--- a/tests/schains/monitor/reload_monitor_test.py
+++ /dev/null
@@ -1,148 +0,0 @@
-import logging
-import platform
-
-import mock
-
-from skale.schain_config.generator import get_nodes_for_schain
-from skale.wallets import SgxWallet
-from skale.utils.helper import ip_from_bytes
-
-from core.schains.checks import SChainChecks
-from core.schains.ima import ImaData
-from core.schains.monitor import RegularMonitor, ReloadMonitor
-from core.schains.runner import get_container_info, get_container_name
-
-from tools.configs import (
- SGX_CERTIFICATES_FOLDER,
- SGX_SERVER_URL
-)
-from tools.configs.containers import SCHAIN_CONTAINER
-
-from web.models.schain import SChainRecord
-
-from tests.dkg_utils import safe_run_dkg_mock, get_bls_public_keys
-from tests.utils import (
- alter_schain_config,
- get_test_rule_controller,
- no_schain_artifacts,
- upsert_schain_record_with_config
-)
-
-
-logger = logging.getLogger(__name__)
-
-
-def test_reload_monitor(
- schain_db,
- skale,
- node_config,
- skale_ima,
- dutils,
- ssl_folder,
- schain_on_contracts,
- predeployed_ima
-):
- schain_name = schain_on_contracts
- upsert_schain_record_with_config(schain_name)
- schain = skale.schains.get_by_name(schain_name)
- nodes = get_nodes_for_schain(skale, schain_name)
- image_name, container_name, _, _ = get_container_info(
- SCHAIN_CONTAINER,
- schain_name
- )
-
- # not using rule_controller fixture to avoid config generation
- rc = get_test_rule_controller(name=schain_name)
-
- sgx_wallet = SgxWallet(
- web3=skale.web3,
- sgx_endpoint=SGX_SERVER_URL,
- path_to_cert=SGX_CERTIFICATES_FOLDER
- )
-
- node_config.id = nodes[0]['id']
- node_config.ip = ip_from_bytes(nodes[0]['ip'])
- node_config.sgx_key_name = sgx_wallet.key_name
-
- schain_record = SChainRecord.get_by_name(schain_name)
- schain_record.set_needs_reload(True)
-
- schain_checks = SChainChecks(
- schain_name,
- node_config.id,
- schain_record=schain_record,
- rule_controller=rc,
- dutils=dutils
- )
- ima_data = ImaData(False, '0x1')
- reload_monitor = ReloadMonitor(
- skale=skale,
- ima_data=ima_data,
- schain=schain,
- node_config=node_config,
- rotation_data={'rotation_id': 0, 'leaving_node': 1},
- checks=schain_checks,
- rule_controller=rc,
- dutils=dutils
- )
- regular_monitor = RegularMonitor(
- skale=skale,
- ima_data=ima_data,
- schain=schain,
- node_config=node_config,
- rotation_data={'rotation_id': 0, 'leaving_node': 1},
- checks=schain_checks,
- rule_controller=rc,
- dutils=dutils
- )
-
- schain_record.set_needs_reload(True)
-
- with no_schain_artifacts(schain['name'], dutils):
- reload_monitor.config_dir()
-
- with mock.patch(
- 'skale.schain_config.rotation_history._compose_bls_public_key_info',
- return_value=get_bls_public_keys()
- ):
- reload_monitor.run()
-
- schain_record = SChainRecord.get_by_name(schain_name)
- assert schain_record.needs_reload is False
- info = dutils.get_info(container_name)
- assert info['status'] == 'not_found'
-
- with mock.patch(
- 'core.schains.monitor.base_monitor.safe_run_dkg',
- safe_run_dkg_mock
- ), mock.patch(
- 'skale.schain_config.rotation_history._compose_bls_public_key_info',
- return_value=get_bls_public_keys()
- ):
- regular_monitor.run()
- alter_schain_config(schain_name, sgx_wallet.public_key)
-
- state = dutils.get_info(container_name)['stats']['State']
- assert state['Status'] == 'running'
- initial_started_at = state['StartedAt']
-
- reload_monitor.run()
-
- state = dutils.get_info(container_name)['stats']['State']
- assert state['Status'] == 'running'
- assert state['StartedAt'] > initial_started_at
-
- assert schain_record.needs_reload is False
- assert schain_checks.config_dir.status
- assert schain_checks.dkg.status
- assert schain_checks.config.status
- assert schain_checks.volume.status
- if not schain_checks.skaled_container.status:
- container_name = get_container_name(SCHAIN_CONTAINER, schain['name'])
- print(dutils.display_container_logs(container_name))
- assert schain_checks.skaled_container.status
- assert not schain_checks.ima_container.status
-
- if platform.system() != 'Darwin': # not working due to the macOS networking in Docker # noqa
- assert schain_checks.rpc.status
- assert schain_checks.blocks.status
diff --git a/tests/schains/monitor/rotation_test.py b/tests/schains/monitor/rotation_test.py
deleted file mode 100644
index 808dc5e5a..000000000
--- a/tests/schains/monitor/rotation_test.py
+++ /dev/null
@@ -1,232 +0,0 @@
-import mock
-import pytest
-
-from core.schains.monitor.rotation_monitor import RotationMonitor
-from core.schains.checks import SChainChecks
-
-from web.models.schain import SChainRecord
-
-from tests.utils import get_test_rule_controller
-
-
-DEFAULT_ROTATION_DATA = {
- 'rotation_id': 1,
- 'freeze_until': 12345678,
- 'new_node': 2999,
- 'leaving_node': 1999
-}
-
-
-@pytest.fixture
-def new_checks(schain_db, _schain_name, node_config, ima_data, dutils):
- schain_record = SChainRecord.get_by_name(schain_db)
- return SChainChecks(
- schain_db,
- node_config.id,
- schain_record=schain_record,
- rule_controller=get_test_rule_controller(_schain_name),
- dutils=dutils
- )
-
-
-def get_rotation_monitor(
- skale,
- name,
- ima_data,
- node_config,
- schain_db,
- dutils,
- new_checks,
- rotation_data,
- rule_controller
-):
- return RotationMonitor(
- skale=skale,
- ima_data=ima_data,
- schain={'name': name, 'partOfNode': 0, 'generation': 0},
- node_config=node_config,
- rotation_data=rotation_data,
- checks=new_checks,
- rule_controller=get_test_rule_controller(name),
- dutils=dutils
- )
-
-
-def test_is_new_node_multiple_new_nodes(
- node_config,
- skale,
- _schain_name,
- ima_data,
- schain_db,
- dutils,
- new_checks
-):
- test_monitor = get_rotation_monitor(
- skale=skale,
- name=_schain_name,
- ima_data=ima_data,
- schain_db=schain_db,
- node_config=node_config,
- rotation_data=DEFAULT_ROTATION_DATA,
- new_checks=new_checks,
- rule_controller=get_test_rule_controller(_schain_name),
- dutils=dutils
- )
- with mock.patch('core.schains.monitor.rotation_monitor.get_previous_schain_groups'):
- with mock.patch(
- 'core.schains.monitor.rotation_monitor.get_new_nodes_list',
- return_value=[node_config.id]
- ):
- assert test_monitor.get_rotation_mode_func() == test_monitor.new_node
- with mock.patch(
- 'core.schains.monitor.rotation_monitor.get_new_nodes_list', return_value=[]):
- assert test_monitor.get_rotation_mode_func() != test_monitor.new_node
-
-
-def test_is_new_node(
- node_config,
- schain_config,
- _schain_name,
- skale,
- ima_data,
- schain_db,
- dutils,
- new_checks
-):
- rotation_data_new_node = {
- 'rotation_id': 1,
- 'freeze_until': 12345678,
- 'new_node': node_config.id,
- 'leaving_node': 1999
- }
- with mock.patch('core.schains.monitor.rotation_monitor.get_previous_schain_groups'), \
- mock.patch('core.schains.monitor.rotation_monitor.get_new_nodes_list'):
- test_monitor = get_rotation_monitor(
- skale=skale,
- name=_schain_name,
- ima_data=ima_data,
- schain_db=schain_db,
- node_config=node_config,
- rotation_data=rotation_data_new_node,
- new_checks=new_checks,
- rule_controller=get_test_rule_controller(_schain_name),
- dutils=dutils
- )
- assert test_monitor.get_rotation_mode_func() == test_monitor.new_node
-
- test_monitor = get_rotation_monitor(
- skale=skale,
- name=_schain_name,
- ima_data=ima_data,
- schain_db=schain_db,
- node_config=node_config,
- rotation_data=DEFAULT_ROTATION_DATA,
- new_checks=new_checks,
- rule_controller=get_test_rule_controller(_schain_name),
- dutils=dutils
- )
- assert test_monitor.get_rotation_mode_func() != test_monitor.new_node
-
-
-def test_is_leaving_node(
- node_config,
- schain_config,
- skale,
- _schain_name,
- ima_data,
- schain_db,
- dutils,
- new_checks
-):
- rotation_data_leaving_node = {
- 'rotation_id': 1,
- 'freeze_until': 12345678,
- 'new_node': 9999,
- 'leaving_node': node_config.id,
- }
- with mock.patch('core.schains.monitor.rotation_monitor.get_previous_schain_groups'), \
- mock.patch('core.schains.monitor.rotation_monitor.get_new_nodes_list'):
- test_monitor = get_rotation_monitor(
- skale=skale,
- name=_schain_name,
- ima_data=ima_data,
- schain_db=schain_db,
- node_config=node_config,
- rotation_data=rotation_data_leaving_node,
- new_checks=new_checks,
- rule_controller=get_test_rule_controller(_schain_name),
- dutils=dutils
- )
- assert test_monitor.get_rotation_mode_func() == test_monitor.leaving_node
-
- test_monitor = get_rotation_monitor(
- skale=skale,
- name=_schain_name,
- ima_data=ima_data,
- schain_db=schain_db,
- node_config=node_config,
- rotation_data=DEFAULT_ROTATION_DATA,
- new_checks=new_checks,
- rule_controller=get_test_rule_controller(_schain_name),
- dutils=dutils
- )
- assert test_monitor.get_rotation_mode_func() != test_monitor.leaving_node
-
-
-def test_is_staying_node(
- node_config,
- skale,
- _schain_name,
- schain_config,
- ima_data,
- schain_db,
- rule_controller,
- dutils,
- new_checks
-):
- test_monitor = get_rotation_monitor(
- skale=skale,
- name=_schain_name,
- ima_data=ima_data,
- schain_db=schain_db,
- node_config=node_config,
- rotation_data=DEFAULT_ROTATION_DATA,
- new_checks=new_checks,
- rule_controller=rule_controller,
- dutils=dutils
- )
- with mock.patch('core.schains.monitor.rotation_monitor.get_previous_schain_groups'), \
- mock.patch('core.schains.monitor.rotation_monitor.get_new_nodes_list'):
- assert test_monitor.get_rotation_mode_func() == test_monitor.staying_node
-
-
-@pytest.mark.skip(reason="test should be improved")
-def test_rotation_request(
- node_config,
- skale,
- _schain_name,
- schain_config,
- ima_data,
- schain_db,
- rule_controller,
- dutils,
- new_checks
-):
- rotation_data_leaving_node = {
- 'rotation_id': 1,
- 'freeze_until': 12345678,
- 'new_node': 9999,
- 'leaving_node': node_config.id,
- }
- test_monitor = get_rotation_monitor(
- skale=skale,
- name=_schain_name,
- ima_data=ima_data,
- schain_db=schain_db,
- node_config=node_config,
- rotation_data=rotation_data_leaving_node,
- new_checks=new_checks,
- rule_controller=rule_controller,
- dutils=dutils
- )
- test_monitor.rotation_request()
diff --git a/tests/schains/monitor/skaled_monitor_test.py b/tests/schains/monitor/skaled_monitor_test.py
new file mode 100644
index 000000000..57beb9e58
--- /dev/null
+++ b/tests/schains/monitor/skaled_monitor_test.py
@@ -0,0 +1,540 @@
+import datetime
+import os
+import time
+from unittest import mock
+
+import freezegun
+import pytest
+
+from core.schains.checks import CheckRes, SkaledChecks
+from core.schains.config.directory import schain_config_dir
+from core.schains.monitor.action import SkaledActionManager
+from core.schains.monitor.skaled_monitor import (
+ BackupSkaledMonitor,
+ get_skaled_monitor,
+ NewConfigSkaledMonitor,
+ NewNodeSkaledMonitor,
+ NoConfigSkaledMonitor,
+ RecreateSkaledMonitor,
+ RegularSkaledMonitor,
+ RepairSkaledMonitor,
+ UpdateConfigSkaledMonitor
+)
+from core.schains.runner import get_container_info
+from tools.configs.containers import SCHAIN_CONTAINER, IMA_CONTAINER
+from web.models.schain import SChainRecord
+
+
+CURRENT_TIMESTAMP = 1594903080
+CURRENT_DATETIME = datetime.datetime.utcfromtimestamp(CURRENT_TIMESTAMP)
+
+
+def run_ima_container_mock(schain: dict, mainnet_chain_id: int, dutils=None):
+ image_name, container_name, _, _ = get_container_info(
+ IMA_CONTAINER, schain['name'])
+ dutils.safe_rm(container_name)
+ dutils.run_container(
+ image_name=image_name,
+ name=container_name,
+ entrypoint='bash -c "while true; do foo; sleep 2; done"'
+ )
+
+
+def monitor_schain_container_mock(
+ schain,
+ schain_record,
+ skaled_status,
+ download_snapshot=False,
+ start_ts=None,
+ dutils=None
+):
+ image_name, container_name, _, _ = get_container_info(
+ SCHAIN_CONTAINER, schain['name'])
+ dutils.safe_rm(container_name)
+ dutils.run_container(
+ image_name=image_name,
+ name=container_name,
+ entrypoint='bash -c "while true; do foo; sleep 2; done"'
+ )
+
+
+@pytest.fixture
+def rotation_data(schain_db, skale):
+ return skale.node_rotation.get_rotation(schain_db)
+
+
+@pytest.fixture
+def skaled_checks(
+ schain_db,
+ skale,
+ rule_controller,
+ dutils
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+ return SkaledChecks(
+ schain_name=name,
+ schain_record=schain_record,
+ rule_controller=rule_controller,
+ dutils=dutils
+ )
+
+
+@pytest.fixture
+def skaled_am(
+ schain_db,
+ skale,
+ node_config,
+ rule_controller,
+ schain_on_contracts,
+ predeployed_ima,
+ rotation_data,
+ secret_key,
+ ssl_folder,
+ dutils,
+ skaled_checks
+):
+ name = schain_db
+ schain = skale.schains.get_by_name(name)
+ return SkaledActionManager(
+ schain=schain,
+ rule_controller=rule_controller,
+ node_config=node_config,
+ checks=skaled_checks,
+ dutils=dutils
+ )
+
+
+class SkaledChecksNoConfig(SkaledChecks):
+ @property
+ def config(self) -> CheckRes:
+ return CheckRes(False)
+
+
+@pytest.fixture
+def skaled_checks_no_config(
+ schain_db,
+ skale,
+ rule_controller,
+ dutils
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+ return SkaledChecksNoConfig(
+ schain_name=name,
+ schain_record=schain_record,
+ rule_controller=rule_controller,
+ dutils=dutils
+ )
+
+
+class SkaledChecksConfigOutdated(SkaledChecks):
+ @property
+ def config_updated(self) -> CheckRes:
+ return CheckRes(False)
+
+ @property
+ def rotation_id_updated(self) -> CheckRes:
+ return CheckRes(False)
+
+
+@pytest.fixture
+def skaled_checks_outdated_config(
+ schain_db,
+ skale,
+ rule_controller,
+ dutils
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+ return SkaledChecksConfigOutdated(
+ schain_name=name,
+ schain_record=schain_record,
+ rule_controller=rule_controller,
+ dutils=dutils
+ )
+
+
+def test_get_skaled_monitor_no_config(skaled_am, skaled_checks_no_config, skaled_status, schain_db):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+ mon = get_skaled_monitor(
+ skaled_am,
+ skaled_checks_no_config.get_all(),
+ schain_record,
+ skaled_status
+ )
+ assert mon == NoConfigSkaledMonitor
+
+
+def test_get_skaled_monitor_regular_and_backup(skaled_am, skaled_checks, skaled_status, schain_db):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+ mon = get_skaled_monitor(
+ skaled_am,
+ skaled_checks.get_all(),
+ schain_record,
+ skaled_status
+ )
+ assert mon == RegularSkaledMonitor
+
+ schain_record.set_backup_run(True)
+ mon = get_skaled_monitor(
+ skaled_am,
+ skaled_checks.get_all(),
+ schain_record,
+ skaled_status
+ )
+ assert mon == RegularSkaledMonitor
+
+ schain_record.set_first_run(False)
+ mon = get_skaled_monitor(
+ skaled_am,
+ skaled_checks.get_all(),
+ schain_record,
+ skaled_status
+ )
+ assert mon == RegularSkaledMonitor
+
+ schain_record.set_new_schain(False)
+ mon = get_skaled_monitor(
+ skaled_am,
+ skaled_checks.get_all(),
+ schain_record,
+ skaled_status
+ )
+ assert mon == BackupSkaledMonitor
+
+
+def test_get_skaled_monitor_repair(skaled_am, skaled_checks, skaled_status, schain_db):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+ schain_record.set_repair_mode(True)
+
+ mon = get_skaled_monitor(
+ skaled_am,
+ skaled_checks.get_all(),
+ schain_record,
+ skaled_status
+ )
+ assert mon == RepairSkaledMonitor
+
+
+def test_get_skaled_monitor_repair_skaled_status(
+ skaled_am,
+ skaled_checks,
+ schain_db,
+ skaled_status_repair
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+
+ mon = get_skaled_monitor(
+ skaled_am,
+ skaled_checks.get_all(),
+ schain_record,
+ skaled_status_repair
+ )
+ assert mon == RepairSkaledMonitor
+
+
+class SkaledChecksWithConfig(SkaledChecks):
+ @property
+ def config_updated(self) -> CheckRes:
+ return CheckRes(False)
+
+ @property
+ def config(self) -> CheckRes:
+ return CheckRes(True)
+
+ @property
+ def rotation_id_updated(self) -> CheckRes:
+ return CheckRes(True)
+
+ @property
+ def skaled_container(self) -> CheckRes:
+ return CheckRes(True)
+
+ @property
+ def container(self) -> CheckRes:
+ return CheckRes(True)
+
+
+@pytest.fixture
+def skaled_checks_new_config(
+ schain_db,
+ skale,
+ rule_controller,
+ dutils
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+ return SkaledChecksWithConfig(
+ schain_name=name,
+ schain_record=schain_record,
+ rule_controller=rule_controller,
+ dutils=dutils
+ )
+
+
+@freezegun.freeze_time(CURRENT_DATETIME)
+def test_get_skaled_monitor_new_config(
+ skale,
+ skaled_am,
+ skaled_checks_new_config,
+ schain_db,
+ skaled_status,
+ node_config,
+ rule_controller,
+ schain_on_contracts,
+ predeployed_ima,
+ rotation_data,
+ secret_keys,
+ ssl_folder,
+ skaled_checks,
+ dutils
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+
+ state = skaled_checks_new_config.get_all()
+ state['rotation_id_updated'] = False
+
+ schain = skale.schains.get_by_name(name)
+
+ with mock.patch(
+ f'{__name__}.SkaledActionManager.upstream_finish_ts',
+ new_callable=mock.PropertyMock
+ ) as finish_ts_mock:
+ finish_ts_mock.return_value = CURRENT_TIMESTAMP - 10
+ skaled_am = SkaledActionManager(
+ schain=schain,
+ rule_controller=rule_controller,
+ node_config=node_config,
+ checks=skaled_checks,
+ dutils=dutils
+ )
+ mon = get_skaled_monitor(
+ skaled_am,
+ state,
+ schain_record,
+ skaled_status
+ )
+ assert mon == RegularSkaledMonitor
+ finish_ts_mock.return_value = CURRENT_TIMESTAMP + 10
+ skaled_am = SkaledActionManager(
+ schain=schain,
+ rule_controller=rule_controller,
+ node_config=node_config,
+ checks=skaled_checks,
+ dutils=dutils
+ )
+ mon = get_skaled_monitor(
+ skaled_am,
+ state,
+ schain_record,
+ skaled_status
+ )
+ assert mon == NewConfigSkaledMonitor
+
+
+@freezegun.freeze_time(CURRENT_DATETIME)
+def test_get_skaled_monitor_new_node(
+ schain_db,
+ skale,
+ node_config,
+ rule_controller,
+ schain_on_contracts,
+ predeployed_ima,
+ rotation_data,
+ secret_key,
+ ssl_folder,
+ skaled_status,
+ skaled_checks,
+ dutils
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+ schain = skale.schains.get_by_name(name)
+
+ finish_ts = CURRENT_TIMESTAMP + 10
+ with mock.patch(
+ f'{__name__}.SkaledActionManager.finish_ts',
+ new_callable=mock.PropertyMock
+ ) as finish_ts_mock:
+ skaled_am = SkaledActionManager(
+ schain=schain,
+ rule_controller=rule_controller,
+ node_config=node_config,
+ checks=skaled_checks,
+ dutils=dutils
+ )
+ finish_ts_mock.return_value = finish_ts
+
+ mon = get_skaled_monitor(
+ skaled_am,
+ skaled_checks.get_all(),
+ schain_record,
+ skaled_status
+ )
+ assert mon == NewNodeSkaledMonitor
+
+
+def test_get_skaled_monitor_update_config(
+ skaled_am,
+ skaled_checks_outdated_config,
+ schain_db,
+ skaled_status_exit_time_reached,
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+ status = skaled_checks_outdated_config.get_all()
+ status['skaled_container'] = False
+
+ mon = get_skaled_monitor(
+ skaled_am,
+ status,
+ schain_record,
+ skaled_status_exit_time_reached
+ )
+ assert mon == UpdateConfigSkaledMonitor
+
+
+def test_get_skaled_monitor_recreate(
+ skaled_am,
+ skaled_checks,
+ schain_db,
+ skaled_status
+):
+ name = schain_db
+ schain_record = SChainRecord.get_by_name(name)
+
+ schain_record.set_needs_reload(True)
+ mon = get_skaled_monitor(
+ skaled_am,
+ skaled_checks.get_all(),
+ schain_record,
+ skaled_status
+ )
+ assert mon == RecreateSkaledMonitor
+
+
+def test_regular_skaled_monitor(
+ skaled_am,
+ skaled_checks,
+ clean_docker,
+ dutils
+):
+ mon = RegularSkaledMonitor(skaled_am, skaled_checks)
+ mon.run()
+ assert skaled_am.rc.is_rules_synced
+ assert dutils.get_vol(skaled_am.name)
+ assert dutils.safe_get_container(f'skale_schain_{skaled_am.name}')
+ assert dutils.safe_get_container(f'skale_ima_{skaled_am.name}')
+
+
+def test_backup_skaled_monitor(skaled_am, skaled_checks, clean_docker, dutils):
+ mon = BackupSkaledMonitor(skaled_am, skaled_checks)
+ mon.run()
+ assert skaled_am.rc.is_rules_synced
+ assert dutils.get_vol(skaled_am.name)
+ schain_container = dutils.safe_get_container(
+ f'skale_schain_{skaled_am.name}')
+ assert schain_container
+ assert '--download-snapshot' in dutils.get_cmd(schain_container.id)
+ assert dutils.safe_get_container(f'skale_ima_{skaled_am.name}')
+
+
+def test_repair_skaled_monitor(skaled_am, skaled_checks, clean_docker, dutils):
+ mon = RepairSkaledMonitor(skaled_am, skaled_checks)
+ ts_before = time.time()
+ mon.run()
+ time.sleep(1)
+ assert skaled_am.rc.is_rules_synced
+ assert dutils.get_vol(skaled_am.name)
+
+ assert dutils.get_vol_created_ts(skaled_am.name) > ts_before
+ schain_container = dutils.safe_get_container(
+ f'skale_schain_{skaled_am.name}')
+ assert schain_container
+ assert '--download-snapshot' in dutils.get_cmd(schain_container.id)
+ assert dutils.get_container_created_ts(schain_container.id) > ts_before
+ assert not dutils.safe_get_container(f'skale_ima_{skaled_am.name}')
+
+
+def test_new_config_skaled_monitor(skaled_am, skaled_checks, clean_docker, dutils):
+ mon = NewConfigSkaledMonitor(skaled_am, skaled_checks)
+ ts = time.time()
+ with mock.patch('core.schains.monitor.action.get_finish_ts_from_latest_upstream',
+ return_value=ts):
+ with mock.patch('core.schains.monitor.action.set_rotation_for_schain') as set_exit_mock:
+ mon.run()
+ set_exit_mock.assert_called_with('http://127.0.0.1:10003', ts)
+ assert skaled_am.rc.is_rules_synced
+ assert dutils.get_vol(skaled_am.name)
+ assert dutils.safe_get_container(f'skale_schain_{skaled_am.name}')
+ assert dutils.safe_get_container(f'skale_ima_{skaled_am.name}')
+
+
+@pytest.mark.skip
+def test_new_config_skaled_monitor_failed_skaled(skaled_am, skaled_checks, clean_docker, dutils):
+ mon = NewConfigSkaledMonitor(skaled_am, skaled_checks)
+ with mock.patch('core.schains.monitor.containers.run_schain_container') \
+ as run_skaled_container_mock:
+ mon.run()
+ assert skaled_am.rc.is_rules_synced
+ assert run_skaled_container_mock.assert_not_called()
+
+
+def test_recreate_skaled_monitor(skaled_am, skaled_checks, clean_docker, dutils):
+ mon = RecreateSkaledMonitor(skaled_am, skaled_checks)
+ ts_before = time.time()
+ time.sleep(1)
+ mon.run()
+ schain_container = dutils.safe_get_container(
+ f'skale_schain_{skaled_am.name}')
+ assert schain_container
+ assert dutils.get_container_created_ts(schain_container.id) > ts_before
+
+
+def test_update_config_skaled_monitor(
+ skaled_am,
+ skaled_checks,
+ dutils,
+ clean_docker,
+ upstreams,
+ skaled_status_exit_time_reached
+):
+ name = skaled_checks.name
+ ts_before = time.time()
+ time.sleep(1)
+ mon = UpdateConfigSkaledMonitor(skaled_am, skaled_checks)
+ mon.run()
+ assert dutils.get_vol(name)
+ assert dutils.get_vol_created_ts(name) > ts_before
+ schain_container = dutils.safe_get_container(
+ f'skale_schain_{name}'
+ )
+ assert schain_container
+ assert dutils.get_container_created_ts(schain_container.id) > ts_before
+ os.stat(os.path.join(schain_config_dir(name),
+ f'schain_{name}.json')).st_mtime > ts_before
+
+
+def test_no_config_monitor(skaled_am, skaled_checks, clean_docker, dutils):
+ mon = NoConfigSkaledMonitor(skaled_am, skaled_checks)
+ mon.run()
+ assert not dutils.get_vol(skaled_am.name)
+ assert not dutils.safe_get_container(f'skale_schain_{skaled_am.name}')
+ assert not dutils.safe_get_container(f'skale_ima_{skaled_am.name}')
+
+
+def test_new_node_monitor(skaled_am, skaled_checks, clean_docker, dutils):
+ mon = NewNodeSkaledMonitor(skaled_am, skaled_checks)
+ mon.run()
+ assert skaled_am.rc.is_rules_synced
+ assert dutils.get_vol(skaled_am.name)
+ schain_container = dutils.safe_get_container(
+ f'skale_schain_{skaled_am.name}')
+ assert schain_container
+ assert '--download-snapshot' in dutils.get_cmd(schain_container.id)
diff --git a/tests/schains/runner_test.py b/tests/schains/runner_test.py
index f72e5161d..867ff141d 100644
--- a/tests/schains/runner_test.py
+++ b/tests/schains/runner_test.py
@@ -14,14 +14,13 @@ def json(self):
def test_set_rotation(schain_config):
with mock.patch('core.schains.rotation.requests.post',
new=mock.Mock(return_value=ResponseMock())) as post:
- schain_name = schain_config['skaleConfig']['sChain']['schainName']
- set_rotation_for_schain(schain_name, 100)
+ fts = 100
+ url = 'http://127.0.0.1:10003'
+ set_rotation_for_schain(url=url, timestamp=fts)
args, kwargs = post.call_args
data = json.loads(kwargs['data'])
- params = {
- 'finishTime': 100
- }
- assert kwargs['url'] == 'http://127.0.0.1:10003'
+ params = {'finishTime': fts}
+ assert kwargs['url'] == url
assert data['method'] == 'setSchainExitTime'
assert data['params'] == params
@@ -44,6 +43,7 @@ def test_is_exited(dutils):
dutils.get_info = get_info
-def test_get_leaving_schains_for_node(skale, node_config): # TODO: improve test
+# TODO: improve test
+def test_get_leaving_schains_for_node(skale, node_config):
leaving_schains = get_leaving_schains_for_node(skale, node_config.id)
assert isinstance(leaving_schains, list)
diff --git a/tests/schains/schain_eth_state_test.py b/tests/schains/schain_eth_state_test.py
new file mode 100644
index 000000000..796e5054b
--- /dev/null
+++ b/tests/schains/schain_eth_state_test.py
@@ -0,0 +1,17 @@
+from core.schains.external_config import ExternalConfig, ExternalState
+from tests.utils import ALLOWED_RANGES
+
+
+def test_schain_mainnet_state(schain_db, secret_key):
+ name = schain_db
+ econfig = ExternalConfig(name=name)
+ assert econfig.ranges == []
+ assert econfig.ima_linked
+ assert econfig.chain_id is None
+
+ estate = ExternalState(ima_linked=False, chain_id=4, ranges=ALLOWED_RANGES)
+
+ econfig.update(estate)
+ assert econfig.ranges == ALLOWED_RANGES
+ assert not econfig.ima_linked
+ assert econfig.chain_id == 4
diff --git a/tests/schains/task_test.py b/tests/schains/task_test.py
new file mode 100644
index 000000000..f5c574094
--- /dev/null
+++ b/tests/schains/task_test.py
@@ -0,0 +1,33 @@
+import functools
+import time
+
+import pytest
+
+from core.schains.task import run_tasks, Task
+
+ITERATIONS = 10
+SCHAINS_NUM = 10
+
+
+class StopActionError(Exception):
+ pass
+
+
+def action(name):
+ for i in range(ITERATIONS):
+ time.sleep(2)
+ raise StopActionError(f'Stopping {name}')
+
+
+@pytest.mark.skip
+def test_tasks():
+ tasks = [
+ Task(
+ f'test-schain-{i}',
+ functools.partial(action, name=f'test-schain-{i}'),
+ i
+ )
+ for i in range(SCHAINS_NUM)
+ ]
+ run_tasks(tasks=tasks)
+ time.sleep(3)
diff --git a/tests/skale-data/config/containers.json b/tests/skale-data/config/containers.json
index 02b4a3eb1..3561d2539 100644
--- a/tests/skale-data/config/containers.json
+++ b/tests/skale-data/config/containers.json
@@ -1,7 +1,7 @@
{
"schain": {
"name": "skalenetwork/schain",
- "version": "3.15.13-develop.13",
+ "version": "3.16.1",
"custom_args": {
"ulimits_list": [
{
@@ -31,8 +31,8 @@
},
"ima": {
"name": "skalenetwork/ima",
- "version": "1.3.4-beta.5",
- "new_version": "2.0.0-develop.3",
+ "version": "2.0.0-develop.3",
+ "new_version": "2.0.0-beta.9",
"custom_args": {},
"args": {
"restart_policy": {
diff --git a/tests/test_nginx.py b/tests/test_nginx.py
index 328804269..08de8d695 100644
--- a/tests/test_nginx.py
+++ b/tests/test_nginx.py
@@ -67,7 +67,7 @@ def tmp_dir():
try:
yield path
finally:
- shutil.rmtree(path)
+ shutil.rmtree(path, ignore_errors=True)
@pytest.fixture
@@ -91,7 +91,7 @@ def ssl_dir():
try:
yield path
finally:
- shutil.rmtree(path)
+ shutil.rmtree(path, ignore_errors=True)
@pytest.fixture
diff --git a/tests/utils.py b/tests/utils.py
index f424c809a..22fad01fa 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -18,9 +18,8 @@
remove_schain_container,
remove_schain_volume
)
-from core.schains.config.main import save_schain_config
-from core.schains.config.helper import get_schain_config
-from core.schains.firewall.types import IHostFirewallController
+from core.schains.config.file_manager import ConfigFileManager
+from core.schains.firewall.types import IHostFirewallController, IpRange
from core.schains.firewall import SChainFirewallManager, SChainRuleController
from core.schains.runner import (
get_image_name,
@@ -48,6 +47,12 @@
CONFIG_STREAM = "1.0.0-testnet"
+ALLOWED_RANGES = [
+ IpRange('1.1.1.1', '2.2.2.2'),
+ IpRange('3.3.3.3', '4.4.4.4')
+]
+
+
class FailedAPICall(Exception):
pass
@@ -175,11 +180,12 @@ def alter_schain_config(schain_name: str, public_key: str) -> None:
"""
Fix config to make skaled work with a single node (mine blocks, etc)
"""
- config = get_schain_config(schain_name)
+ cfm = ConfigFileManager(schain_name)
+ config = cfm.skaled_config
node = config['skaleConfig']['sChain']['nodes'][0]
node['publicKey'] = public_key
config['skaleConfig']['sChain']['nodes'] = [node]
- save_schain_config(config, schain_name)
+ cfm.save_skaled_config(config)
class HostTestFirewallController(IHostFirewallController):
diff --git a/tools/configs/__init__.py b/tools/configs/__init__.py
index 341d1e2fe..da5c6c63c 100644
--- a/tools/configs/__init__.py
+++ b/tools/configs/__init__.py
@@ -86,3 +86,5 @@
CHECK_REPORT_PATH = os.path.join(SKALE_VOLUME_PATH, 'reports', 'checks.json')
NODE_OPTIONS_FILEPATH = os.path.join(NODE_DATA_PATH, 'node_options.json')
+
+PULL_CONFIG_FOR_SCHAIN = os.getenv('PULL_CONFIG_FOR_SCHAIN')
diff --git a/tools/configs/containers.py b/tools/configs/containers.py
index d24be6ba4..f4052a3ed 100644
--- a/tools/configs/containers.py
+++ b/tools/configs/containers.py
@@ -43,8 +43,6 @@
CREATED_STATUS = 'created'
RUNNING_STATUS = 'running'
-LOCAL_IP = '127.0.0.1'
-
DOCKER_DEFAULT_HEAD_LINES = 400
DOCKER_DEFAULT_TAIL_LINES = 10000
diff --git a/tools/configs/logs.py b/tools/configs/logs.py
index 35376a400..6ccb58043 100644
--- a/tools/configs/logs.py
+++ b/tools/configs/logs.py
@@ -38,10 +38,10 @@
REMOVED_CONTAINERS_FOLDER_NAME
)
-LOG_FILE_SIZE_MB = 100
+LOG_FILE_SIZE_MB = 40
LOG_FILE_SIZE_BYTES = LOG_FILE_SIZE_MB * 1000000
-LOG_BACKUP_COUNT = 3
+LOG_BACKUP_COUNT = 10
-ADMIN_LOG_FORMAT = '[%(asctime)s %(levelname)s] - %(process)d - %(threadName)s - %(name)s:%(lineno)d - %(message)s' # noqa
+ADMIN_LOG_FORMAT = '[%(asctime)s %(levelname)s][%(process)d][%(processName)s][%(threadName)s] - %(name)s:%(lineno)d - %(message)s' # noqa
API_LOG_FORMAT = '[%(asctime)s] %(process)d %(levelname)s %(url)s %(module)s: %(message)s' # noqa
diff --git a/tools/docker_utils.py b/tools/docker_utils.py
index ccc252562..301fa87f9 100644
--- a/tools/docker_utils.py
+++ b/tools/docker_utils.py
@@ -17,15 +17,16 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import os
import io
import itertools
import logging
import multiprocessing
+import os
import re
import time
+from datetime import datetime
from functools import wraps
-from typing import Optional
+from typing import Dict, Optional
import docker
from docker import APIClient
@@ -45,6 +46,7 @@
CONTAINER_LOGS_SEPARATOR
)
from tools.configs.logs import REMOVED_CONTAINERS_FOLDER_PATH
+from tools.helper import run_cmd
logger = logging.getLogger(__name__)
@@ -91,7 +93,7 @@ def init_docker_client(
self,
host: str = DEFAULT_DOCKER_HOST
) -> DockerClient:
- logger.info(f'Initing docker client with host {host}')
+ logger.debug('Initing docker client with host %s', host)
return docker.DockerClient(base_url=host)
def init_docker_cli(
@@ -156,7 +158,7 @@ def get_info(self, container_id: str) -> dict:
container_info['stats'] = self.cli.inspect_container(container.id)
container_info['status'] = container.status
except docker.errors.NotFound:
- logger.warning(
+ logger.debug(
f'Can not get info - no such container: {container_id}')
container_info['status'] = CONTAINER_NOT_FOUND
return container_info
@@ -188,7 +190,7 @@ def get_vol(self, name: str) -> Volume:
try:
return self.client.volumes.get(name)
except docker.errors.NotFound:
- logger.warning(f'Volume {name} is not exist')
+ logger.debug(f'Volume {name} is not exist')
return None
def rm_vol(self, name: str, retry_lvmpy_error: bool = True) -> None:
@@ -223,8 +225,8 @@ def safe_get_container(self, container_name: str):
try:
return self.client.containers.get(container_name)
except docker.errors.APIError as e:
- logger.warning(e)
- logger.warning(f'No such container: {container_name}')
+ logger.debug(e)
+ logger.debug(f'No such container: {container_name}')
def safe_rm(self, container_name: str, timeout=DOCKER_DEFAULT_STOP_TIMEOUT, **kwargs):
"""
@@ -333,6 +335,28 @@ def restart(
except docker.errors.APIError:
logger.error(f'No such container: {container_name}')
+ def get_cmd(self, container_id: str) -> Dict:
+ info = self.get_info(container_id)
+ if info:
+ return info['stats']['Config']['Cmd']
+ return {}
+
+ def get_container_created_ts(self, container_id: str) -> int:
+ info = self.get_info(container_id)
+ if info:
+ iso_time = info['stats']['Created'].split('.')[0]
+ return int(datetime.fromisoformat(iso_time).timestamp())
+ else:
+ return 0
+
+ def get_vol_created_ts(self, name: str) -> int:
+ vol = self.get_vol(name)
+ if vol:
+ iso_time = vol.attrs['CreatedAt'][:-1]
+ return int(datetime.fromisoformat(iso_time).timestamp())
+ else:
+ return 0
+
def restart_all_schains(
self,
timeout: int = DOCKER_DEFAULT_STOP_TIMEOUT
@@ -343,8 +367,8 @@ def restart_all_schains(
def pull(self, name: str) -> None:
with DockerUtils.docker_lock:
- repo, tag = name.split(':')
- self.client.images.pull(repository=repo, tag=tag)
+ # repo, tag = name.split(':')
+ run_cmd(['docker', 'pull', name])
def pulled(self, name: str) -> bool:
with DockerUtils.docker_lock:
diff --git a/tools/helper.py b/tools/helper.py
index 8a67b54bf..c19538c59 100644
--- a/tools/helper.py
+++ b/tools/helper.py
@@ -160,7 +160,7 @@ def get_endpoint_call_speed(web3):
scores = []
for _ in range(10):
start = time.time()
- result = web3.eth.gasPrice
+ result = web3.eth.gas_price
if result:
scores.append(time.time() - start)
if len(scores) == 0:
diff --git a/tools/notifications/messages.py b/tools/notifications/messages.py
index 584567049..907e84fc7 100644
--- a/tools/notifications/messages.py
+++ b/tools/notifications/messages.py
@@ -53,7 +53,7 @@ def wrapper(*args, **kwargs):
return func(*args, **kwargs)
except Exception:
logger.exception(
- f'Notification {func.__name__} sending failed')
+ 'Notification %s sending failed', func.__name__)
return wrapper
diff --git a/tools/wallet_utils.py b/tools/wallet_utils.py
index d5a0eb2a2..3faca9e6a 100644
--- a/tools/wallet_utils.py
+++ b/tools/wallet_utils.py
@@ -21,7 +21,6 @@
from redis import Redis
-
from skale.utils.web3_utils import init_web3
from skale.wallets import BaseWallet, RedisWalletAdapter, SgxWallet
from skale.wallets.web3_wallet import to_checksum_address
@@ -43,12 +42,12 @@
def wallet_with_balance(skale): # todo: move to the skale.py
address = skale.wallet.address
- eth_balance_wei = skale.web3.eth.getBalance(address)
+ eth_balance_wei = skale.web3.eth.get_balance(address)
return {
'address': to_checksum_address(address),
'eth_balance_wei': eth_balance_wei,
'skale_balance_wei': 0,
- 'eth_balance': str(skale.web3.fromWei(eth_balance_wei, 'ether')),
+ 'eth_balance': str(skale.web3.from_wei(eth_balance_wei, 'ether')),
'skale_balance': '0'
}
diff --git a/web/migrations.py b/web/migrations.py
index 7a01f8406..2ca167cd3 100644
--- a/web/migrations.py
+++ b/web/migrations.py
@@ -58,6 +58,10 @@ def run_migrations(db, migrator):
# 2.3 -> 2.4 update fields
add_failed_snapshot_from(db, migrator)
+ # 2.4 -> 2.5 update fields
+ add_backup_run_field(db, migrator)
+ add_sync_config_run_field(db, migrator)
+
def add_new_schain_field(db, migrator):
add_column(
@@ -122,6 +126,20 @@ def add_failed_snapshot_from(db, migrator):
)
+def add_backup_run_field(db, migrator):
+ add_column(
+ db, migrator, 'SChainRecord', 'backup_run',
+ BooleanField(default=False)
+ )
+
+
+def add_sync_config_run_field(db, migrator):
+ add_column(
+ db, migrator, 'SChainRecord', 'sync_config_run',
+ BooleanField(default=False)
+ )
+
+
def find_column(db, table_name, column_name):
columns = db.get_columns(table_name)
return next((x for x in columns if x.name == column_name), None)
diff --git a/web/models/schain.py b/web/models/schain.py
index 8bdff1069..94b0426c2 100644
--- a/web/models/schain.py
+++ b/web/models/schain.py
@@ -18,7 +18,9 @@
# along with this program. If not, see .
import logging
+import threading
from datetime import datetime
+
from peewee import (CharField, DateTimeField,
IntegrityError, IntegerField, BooleanField)
@@ -31,6 +33,7 @@
class SChainRecord(BaseModel):
+ _lock = threading.Lock()
name = CharField(unique=True)
added_at = DateTimeField()
dkg_status = IntegerField()
@@ -39,7 +42,8 @@ class SChainRecord(BaseModel):
new_schain = BooleanField(default=True)
repair_mode = BooleanField(default=False)
needs_reload = BooleanField(default=False)
-
+ backup_run = BooleanField(default=False)
+ sync_config_run = BooleanField(default=False)
monitor_last_seen = DateTimeField()
monitor_id = IntegerField(default=0)
@@ -94,6 +98,10 @@ def to_dict(cls, record):
'config_version': record.config_version
}
+ def upload(self, *args, **kwargs) -> None:
+ with SChainRecord._lock:
+ self.save(*args, **kwargs)
+
def dkg_started(self):
self.set_dkg_status(DKGStatus.IN_PROGRESS)
@@ -109,63 +117,68 @@ def dkg_done(self):
def set_dkg_status(self, val: DKGStatus) -> None:
logger.info(f'Changing DKG status for {self.name} to {val.name}')
self.dkg_status = val.value
- self.save()
+ self.upload()
def set_deleted(self):
self.is_deleted = True
- self.save()
+ self.upload()
def set_first_run(self, val):
logger.info(f'Changing first_run for {self.name} to {val}')
self.first_run = val
- self.save(only=[SChainRecord.first_run])
+ self.upload(only=[SChainRecord.first_run])
+
+ def set_backup_run(self, val):
+ logger.info(f'Changing backup_run for {self.name} to {val}')
+ self.backup_run = val
+ self.upload(only=[SChainRecord.backup_run])
def set_repair_mode(self, value):
logger.info(f'Changing repair_mode for {self.name} to {value}')
self.repair_mode = value
- self.save()
+ self.upload()
def set_new_schain(self, value):
logger.info(f'Changing new_schain for {self.name} to {value}')
self.new_schain = value
- self.save()
+ self.upload()
def set_needs_reload(self, value):
logger.info(f'Changing needs_reload for {self.name} to {value}')
self.needs_reload = value
- self.save()
+ self.upload()
def set_monitor_last_seen(self, value):
logger.info(f'Changing monitor_last_seen for {self.name} to {value}')
self.monitor_last_seen = value
- self.save()
+ self.upload()
def set_monitor_id(self, value):
logger.info(f'Changing monitor_id for {self.name} to {value}')
self.monitor_id = value
- self.save()
+ self.upload()
def set_config_version(self, value):
logger.info(f'Changing config_version for {self.name} to {value}')
self.config_version = value
- self.save()
+ self.upload()
def set_restart_count(self, value: int) -> None:
logger.info(f'Changing restart count for {self.name} to {value}')
self.restart_count = value
- self.save()
+ self.upload()
def set_failed_rpc_count(self, value: int) -> None:
logger.info(f'Changing failed rpc count for {self.name} to {value}')
self.failed_rpc_count = value
- self.save()
+ self.upload()
def set_snapshot_from(self, value: str) -> None:
logger.info(f'Changing snapshot from for {self.name} to {value}')
self.snapshot_from = value
- self.save()
+ self.upload()
- def reset_failed_conunters(self) -> None:
+ def reset_failed_counters(self) -> None:
logger.info(f'Resetting failed counters for {self.name}')
self.set_restart_count(0)
self.set_failed_rpc_count(0)
@@ -173,6 +186,11 @@ def reset_failed_conunters(self) -> None:
def is_dkg_done(self) -> bool:
return self.dkg_status == DKGStatus.DONE.value
+ def set_sync_config_run(self, value):
+ logger.info(f'Changing sync_config_run for {self.name} to {value}')
+ self.sync_config_run = value
+ self.upload()
+
def is_dkg_unsuccessful(self) -> bool:
return self.dkg_status in [
DKGStatus.KEY_GENERATION_ERROR.value,
@@ -193,6 +211,24 @@ def set_schains_first_run():
query.execute()
+def set_schains_backup_run():
+ logger.info('Setting backup_run=True for all sChain records')
+ query = SChainRecord.update(backup_run=True).where(
+ SChainRecord.backup_run == False) # noqa
+ query.execute()
+
+
+def set_schains_sync_config_run(chain: str):
+ logger.info('Setting sync_config_run=True for sChain: %s', chain)
+ if chain == 'all':
+ query = SChainRecord.update(sync_config_run=True).where(
+ SChainRecord.sync_config_run == False) # noqa
+ else:
+ query = SChainRecord.update(sync_config_run=True).where(
+ SChainRecord.sync_config_run == False and SChainRecord.name == chain) # noqa
+ query.execute()
+
+
def set_schains_need_reload():
logger.info('Setting needs_reload=True for all sChain records')
query = SChainRecord.update(needs_reload=True).where(
@@ -233,6 +269,18 @@ def set_first_run(name, value):
schain_record.set_first_run(value)
+def set_backup_run(name, value):
+ if SChainRecord.added(name):
+ schain_record = SChainRecord.get_by_name(name)
+ schain_record.set_backup_run(value)
+
+
+def set_sync_config_run(name, value):
+ if SChainRecord.added(name):
+ schain_record = SChainRecord.get_by_name(name)
+ schain_record.set_sync_config_run(value)
+
+
def get_schains_names(include_deleted=False):
return [r.name for r in SChainRecord.get_all_records(include_deleted)]
diff --git a/web/routes/health.py b/web/routes/health.py
index 54ae1f894..d4306659c 100644
--- a/web/routes/health.py
+++ b/web/routes/health.py
@@ -28,13 +28,14 @@
from urllib.parse import urlparse
-from core.node import get_check_report
+from core.node import get_check_report, get_skale_node_version
from core.schains.checks import SChainChecks
from core.schains.firewall.utils import (
get_default_rule_controller,
get_sync_agent_ranges
)
from core.schains.ima import get_ima_log_checks
+from core.schains.external_config import ExternalState
from tools.sgx_utils import SGX_CERTIFICATES_FOLDER, SGX_SERVER_URL
from tools.configs import ZMQ_PORT, ZMQ_TIMEOUT
from web.models.schain import SChainRecord
@@ -84,6 +85,12 @@ def schains_checks():
schains = g.skale.schains.get_schains_for_node(node_id)
sync_agent_ranges = get_sync_agent_ranges(g.skale)
+ stream_version = get_skale_node_version()
+ estate = ExternalState(
+ chain_id=g.skale.web3.eth.chain_id,
+ ima_linked=True,
+ ranges=[]
+ )
checks = []
for schain in schains:
if schain.get('name') != '':
@@ -100,8 +107,10 @@ def schains_checks():
node_id,
schain_record=schain_record,
rule_controller=rc,
- rotation_id=rotation_id
- ).get_all(checks_filter=checks_filter)
+ rotation_id=rotation_id,
+ stream_version=stream_version,
+ estate=estate
+ ).get_all(needed=checks_filter)
checks.append({
'name': schain['name'],
'healthchecks': schain_checks
diff --git a/web/routes/node.py b/web/routes/node.py
index feea1ca77..4a2f6dc16 100644
--- a/web/routes/node.py
+++ b/web/routes/node.py
@@ -187,10 +187,10 @@ def hardware():
def endpoint_info():
logger.debug(request)
call_speed = get_endpoint_call_speed(g.web3)
- block_number = g.web3.eth.blockNumber
+ block_number = g.web3.eth.block_number
trusted = not any([untrusted in ENDPOINT for untrusted in UNTRUSTED_PROVIDERS])
try:
- eth_client_version = g.web3.clientVersion
+ eth_client_version = g.web3.client_version
except Exception:
logger.exception('Cannot get client version')
eth_client_version = 'unknown'
diff --git a/web/routes/schains.py b/web/routes/schains.py
index 8fd241661..2548a4d33 100644
--- a/web/routes/schains.py
+++ b/web/routes/schains.py
@@ -21,12 +21,11 @@
from flask import Blueprint, g, request
-from core.schains.config.directory import schain_config_exists
+from core.schains.config.file_manager import ConfigFileManager
from core.schains.config.helper import (
get_base_port_from_config,
get_node_ips_from_config,
- get_own_ip_from_config,
- get_schain_config
+ get_own_ip_from_config
)
from core.schains.firewall.utils import (
get_default_rule_controller,
@@ -74,13 +73,13 @@ def schain_config():
schain_name = request.args.get(key)
if not schain_name:
return construct_key_error_response([key])
- schain_config = get_schain_config(schain_name)
- if schain_config is None:
+ config = ConfigFileManager(schain_name).skaled_config
+ if config is None:
return construct_err_response(
msg=f'sChain config not found: {schain_name}'
)
- skale_schain_config = schain_config['skaleConfig']
- return construct_ok_response(skale_schain_config)
+ skale_config = config['skaleConfig']
+ return construct_ok_response(skale_config)
@schains_bp.route(get_api_url(BLUEPRINT_NAME, 'list'), methods=['GET'])
@@ -112,11 +111,12 @@ def firewall_rules():
logger.debug(request)
schain_name = request.args.get('schain_name')
sync_agent_ranges = get_sync_agent_ranges(g.skale)
- if not schain_config_exists(schain_name):
+ cfm = ConfigFileManager(schain_name)
+ if not cfm.skaled_config_exists:
return construct_err_response(
msg=f'No schain with name {schain_name}'
)
- conf = get_schain_config(schain_name)
+ conf = cfm.skaled_config
base_port = get_base_port_from_config(conf)
node_ips = get_node_ips_from_config(conf)
own_ip = get_own_ip_from_config(conf)
@@ -137,7 +137,8 @@ def repair():
logger.debug(request)
schain_name = request.json.get('schain_name')
snapshot_from = request.json.get('snapshot_from', '')
- result = toggle_schain_repair_mode(schain_name, snapshot_from=snapshot_from)
+ result = toggle_schain_repair_mode(
+ schain_name, snapshot_from=snapshot_from)
if result:
return construct_ok_response()
else: