diff --git a/.flake8 b/.flake8
index a7a1a9dd9..a43fb2bad 100644
--- a/.flake8
+++ b/.flake8
@@ -1,3 +1,3 @@
[flake8]
max-line-length = 100
-exclude = .git,__pycache__,docs/source/conf.py,old,build,dist,venv,node_modules,helper-scripts
+exclude = .git,__pycache__,docs/source/conf.py,old,build,dist,venv,.venv,node_modules,helper-scripts
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
index f5cc084e9..59296eb3d 100644
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -1,5 +1,5 @@
name: Test
-on: [push, pull_request]
+on: [push]
env:
ETH_PRIVATE_KEY: ${{ secrets.ETH_PRIVATE_KEY }}
SCHAIN_TYPE: ${{ secrets.SCHAIN_TYPE }}
diff --git a/VERSION b/VERSION
index 37c2961c2..834f26295 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-2.7.2
+2.8.0
diff --git a/core/node.py b/core/node.py
index 72df5bf00..86c451e48 100644
--- a/core/node.py
+++ b/core/node.py
@@ -194,7 +194,7 @@ def get_exit_status(self):
self.config.id)
schain_statuses = [
{
- 'name': schain['name'],
+ 'name': schain.name,
'status': SchainExitStatus.ACTIVE.name
}
for schain in active_schains
@@ -207,7 +207,7 @@ def get_exit_status(self):
status = SchainExitStatus.LEFT
else:
status = SchainExitStatus.LEAVING
- schain_name = self.skale.schains.get(schain['schain_id'])['name']
+ schain_name = self.skale.schains.get(schain['schain_id']).name
if not schain_name:
schain_name = '[REMOVED]'
schain_statuses.append(
diff --git a/core/schains/checks.py b/core/schains/checks.py
index 8f4b0d9ee..0202ed22d 100644
--- a/core/schains/checks.py
+++ b/core/schains/checks.py
@@ -41,7 +41,7 @@
from core.schains.dkg.utils import get_secret_key_share_filepath
from core.schains.firewall.types import IRuleController
from core.schains.ima import get_ima_time_frame, get_migration_ts as get_ima_migration_ts
-from core.schains.process_manager_helper import is_monitor_process_alive
+from core.schains.process import is_monitor_process_alive
from core.schains.rpc import (
check_endpoint_alive,
check_endpoint_blocks,
diff --git a/core/schains/cleaner.py b/core/schains/cleaner.py
index 33556f153..f006d439a 100644
--- a/core/schains/cleaner.py
+++ b/core/schains/cleaner.py
@@ -37,7 +37,7 @@
get_node_ips_from_config,
get_own_ip_from_config,
)
-from core.schains.process_manager_helper import terminate_schain_process
+from core.schains.process import ProcessReport, terminate_process
from core.schains.runner import get_container_name, is_exited
from core.schains.external_config import ExternalConfig
from core.schains.types import ContainerType
@@ -109,18 +109,21 @@ def monitor(skale, node_config, dutils=None):
for schain_name in schains_on_node:
if schain_name not in schain_names_on_contracts:
- logger.warning(f'sChain {schain_name} was found on node, but not on contracts: \
-{schain_names_on_contracts}, going to remove it!')
+ logger.warning(
+ '%s was found on node, but not on contracts: %s, trying to cleanup',
+ schain_name,
+ schain_names_on_contracts,
+ )
try:
ensure_schain_removed(skale, schain_name, node_config.id, dutils=dutils)
except Exception:
- logger.exception(f'sChain removal {schain_name} failed')
+ logger.exception('%s removal failed', schain_name)
logger.info('Cleanup procedure finished')
def get_schain_names_from_contract(skale, node_id):
schains_on_contract = skale.schains.get_schains_for_node(node_id)
- return list(map(lambda schain: schain['name'], schains_on_contract))
+ return list(map(lambda schain: schain.name, schains_on_contract))
def get_schains_with_containers(dutils=None):
@@ -185,9 +188,10 @@ def remove_schain(
msg: str,
dutils: Optional[DockerUtils] = None,
) -> None:
- schain_record = upsert_schain_record(schain_name)
logger.warning(msg)
- terminate_schain_process(schain_record)
+ report = ProcessReport(name=schain_name)
+ if report.is_exist():
+ terminate_process(report.pid)
delete_bls_keys(skale, schain_name)
sync_agent_ranges = get_sync_agent_ranges(skale)
@@ -238,14 +242,14 @@ def cleanup_schain(
dutils=dutils,
sync_node=SYNC_NODE,
)
- status = checks.get_all()
- if status['skaled_container'] or is_exited(
+ check_status = checks.get_all()
+ if check_status['skaled_container'] or is_exited(
schain_name, container_type=ContainerType.schain, dutils=dutils
):
remove_schain_container(schain_name, dutils=dutils)
- if status['volume']:
+ if check_status['volume']:
remove_schain_volume(schain_name, dutils=dutils)
- if status['firewall_rules']:
+ if check_status['firewall_rules']:
conf = ConfigFileManager(schain_name).skaled_config
base_port = get_base_port_from_config(conf)
own_ip = get_own_ip_from_config(conf)
@@ -256,11 +260,11 @@ def cleanup_schain(
rc.configure(base_port=base_port, own_ip=own_ip, node_ips=node_ips, sync_ip_ranges=ranges)
rc.cleanup()
if estate is not None and estate.ima_linked:
- if status.get('ima_container', False) or is_exited(
+ if check_status.get('ima_container', False) or is_exited(
schain_name, container_type=ContainerType.ima, dutils=dutils
):
remove_ima_container(schain_name, dutils=dutils)
- if status['config_dir']:
+ if check_status['config_dir']:
remove_config_dir(schain_name)
mark_schain_deleted(schain_name)
diff --git a/core/schains/cmd.py b/core/schains/cmd.py
index c96e2432e..3df08f628 100644
--- a/core/schains/cmd.py
+++ b/core/schains/cmd.py
@@ -17,6 +17,8 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+from typing import Optional
+
from core.schains.config.file_manager import ConfigFileManager
from core.schains.config.helper import get_schain_ports_from_config
from core.schains.config.main import get_skaled_container_config_path
@@ -34,7 +36,7 @@ def get_schain_container_cmd(
download_snapshot: bool = False,
enable_ssl: bool = True,
sync_node: bool = False,
- snapshot_from: str = ''
+ snapshot_from: Optional[str] = None
) -> str:
"""Returns parameters that will be passed to skaled binary in the sChain container"""
opts = get_schain_container_base_opts(schain_name, enable_ssl=enable_ssl, sync_node=sync_node)
diff --git a/core/schains/config/directory.py b/core/schains/config/directory.py
index 8d2c7a66d..ce11c8196 100644
--- a/core/schains/config/directory.py
+++ b/core/schains/config/directory.py
@@ -25,10 +25,11 @@
from tools.configs.schains import (
BASE_SCHAIN_CONFIG_FILEPATH,
+ NODE_CLI_STATUS_FILENAME,
SCHAINS_DIR_PATH,
SCHAINS_DIR_PATH_HOST,
SCHAIN_SCHECKS_FILENAME,
- SKALED_STATUS_FILENAME
+ SKALED_STATUS_FILENAME,
)
@@ -58,6 +59,10 @@ def skaled_status_filepath(name: str) -> str:
return os.path.join(schain_config_dir(name), SKALED_STATUS_FILENAME)
+def node_cli_status_filepath(name: str) -> str:
+ return os.path.join(schain_config_dir(name), NODE_CLI_STATUS_FILENAME)
+
+
def get_schain_check_filepath(schain_name):
schain_dir_path = schain_config_dir(schain_name)
return os.path.join(schain_dir_path, SCHAIN_SCHECKS_FILENAME)
diff --git a/core/schains/config/generator.py b/core/schains/config/generator.py
index fe4eef631..2af161dc5 100644
--- a/core/schains/config/generator.py
+++ b/core/schains/config/generator.py
@@ -21,6 +21,7 @@
from dataclasses import dataclass
from skale import Skale
+from skale.contracts.manager.schains import SchainStructure
from skale.schain_config.generator import get_schain_nodes_with_schains
from skale.schain_config.ports_allocation import get_schain_base_port_on_node
from skale.schain_config.rotation_history import get_previous_schain_groups
@@ -34,7 +35,7 @@
from core.schains.config.predeployed import generate_predeployed_accounts
from core.schains.config.precompiled import generate_precompiled_accounts
from core.schains.config.generation import Gen
-from core.schains.config.static_accounts import is_static_accounts, static_accounts
+from core.schains.config.legacy_data import is_static_accounts, static_accounts, static_groups
from core.schains.config.helper import get_chain_id, get_schain_id
from core.schains.dkg.utils import get_common_bls_public_key
from core.schains.limits import get_schain_type
@@ -88,26 +89,26 @@ def to_dict(self):
}
-def get_on_chain_owner(schain: dict, generation: int, is_owner_contract: bool) -> str:
+def get_on_chain_owner(schain: SchainStructure, generation: int, is_owner_contract: bool) -> str:
"""
Returns on-chain owner depending on sChain generation.
"""
if not is_owner_contract:
- return schain['mainnetOwner']
+ return schain.mainnet_owner
if generation >= Gen.ONE:
return MARIONETTE_ADDRESS
if generation == Gen.ZERO:
- return schain['mainnetOwner']
+ return schain.mainnet_owner
-def get_on_chain_etherbase(schain: dict, generation: int) -> str:
+def get_on_chain_etherbase(schain: SchainStructure, generation: int) -> str:
"""
Returns on-chain owner depending on sChain generation.
"""
if generation >= Gen.ONE:
return ETHERBASE_ADDRESS
if generation == Gen.ZERO:
- return schain['mainnetOwner']
+ return schain.mainnet_owner
def get_schain_id_for_chain(schain_name: str, generation: int) -> int:
@@ -120,17 +121,17 @@ def get_schain_id_for_chain(schain_name: str, generation: int) -> int:
return 1
-def get_schain_originator(schain: dict):
+def get_schain_originator(schain: SchainStructure) -> str:
"""
Returns address that will be used as an sChain originator
"""
- if is_zero_address(schain['originator']):
- return schain['mainnetOwner']
- return schain['originator']
+ if is_zero_address(schain.originator):
+ return schain.mainnet_owner
+ return schain.originator
def generate_schain_config(
- schain: dict, node_id: int, node: dict, ecdsa_key_name: str,
+ schain: SchainStructure, node_id: int, node: dict, ecdsa_key_name: str,
rotation_id: int, schain_nodes_with_schains: list,
node_groups: list, generation: int, is_owner_contract: bool,
skale_manager_opts: SkaleManagerOpts, schain_base_port: int, common_bls_public_keys: list[str],
@@ -139,7 +140,7 @@ def generate_schain_config(
) -> SChainConfig:
"""Main function that is used to generate sChain config"""
logger.info(
- f'Going to generate sChain config for {schain["name"]}, '
+ f'Going to generate sChain config for {schain.name}, '
f'node_name: {node["name"]}, node_id: {node_id}, rotation_id: {rotation_id}'
)
if sync_node:
@@ -149,17 +150,23 @@ def generate_schain_config(
on_chain_etherbase = get_on_chain_etherbase(schain, generation)
on_chain_owner = get_on_chain_owner(schain, generation, is_owner_contract)
- mainnet_owner = schain['mainnetOwner']
- schain_type = get_schain_type(schain['partOfNode'])
+ mainnet_owner = schain.mainnet_owner
+ schain_type = get_schain_type(schain.part_of_node)
- schain_id = get_schain_id_for_chain(schain['name'], generation)
+ schain_id = get_schain_id_for_chain(schain.name, generation)
base_config = SChainBaseConfig(BASE_SCHAIN_CONFIG_FILEPATH)
dynamic_params = {
- 'chainID': get_chain_id(schain['name'])
+ 'chainID': get_chain_id(schain.name)
}
+ legacy_groups = static_groups(schain.name)
+ logger.debug('Legacy node groups: %s', legacy_groups)
+ logger.debug('Vanilla node groups: %s', node_groups)
+ node_groups.update(legacy_groups)
+ logger.debug('Modified node groups: %s', node_groups)
+
originator_address = get_schain_originator(schain)
skale_config = generate_skale_section(
@@ -182,13 +189,14 @@ def generate_schain_config(
)
accounts = {}
- if is_static_accounts(schain['name']):
- logger.info(f'Found static account for {schain["name"]}, going to use in config')
- accounts = static_accounts(schain['name'])['accounts']
+ if is_static_accounts(schain.name):
+ logger.info(f'Found static account for {schain.name}, going to use in config')
+ accounts = static_accounts(schain.name)['accounts']
else:
logger.info('Static accounts not found, generating regular accounts section')
predeployed_accounts = generate_predeployed_accounts(
- schain_name=schain['name'],
+ schain_name=schain.name,
+ allocation_type=schain.options.allocation_type,
schain_type=schain_type,
schain_nodes=schain_nodes_with_schains,
on_chain_owner=on_chain_owner,
@@ -235,7 +243,7 @@ def generate_schain_config_with_skale(
node = skale.nodes.get(node_config.id)
node_groups = get_previous_schain_groups(skale, schain_name)
- is_owner_contract = is_address_contract(skale.web3, schain['mainnetOwner'])
+ is_owner_contract = is_address_contract(skale.web3, schain.mainnet_owner)
skale_manager_opts = init_skale_manager_opts(skale)
group_index = skale.schains.name_to_id(schain_name)
@@ -246,7 +254,7 @@ def generate_schain_config_with_skale(
else:
schain_base_port = get_schain_base_port_on_node(
schains_on_node,
- schain['name'],
+ schain.name,
node['port']
)
diff --git a/core/schains/config/static_accounts.py b/core/schains/config/legacy_data.py
similarity index 55%
rename from core/schains/config/static_accounts.py
rename to core/schains/config/legacy_data.py
index 989da99f8..000d2455d 100644
--- a/core/schains/config/static_accounts.py
+++ b/core/schains/config/legacy_data.py
@@ -19,8 +19,10 @@
import os
+from skale.schain_config.rotation_history import RotationNodeData
+
from tools.helper import read_json
-from tools.configs import STATIC_ACCOUNTS_FOLDER, ENV_TYPE
+from tools.configs import STATIC_ACCOUNTS_FOLDER, STATIC_GROUPS_FOLDER, ENV_TYPE
def static_accounts(schain_name: str) -> dict:
@@ -36,3 +38,25 @@ def static_accounts_filepath(schain_name: str) -> str:
if not os.path.isdir(static_accounts_env_path):
return ''
return os.path.join(static_accounts_env_path, f'schain-{schain_name}.json')
+
+
+def static_groups(schain_name: str) -> dict:
+ static_groups_env_path = static_groups_filepath(schain_name)
+ if not os.path.isfile(static_groups_env_path):
+ return {}
+ groups = read_json(static_groups_env_path)
+ prepared_groups = {}
+ for plain_rotation_id, data in groups.items():
+ rotation_id = int(plain_rotation_id)
+ prepared_groups[rotation_id] = data
+ prepared_nodes = prepared_groups[rotation_id]['nodes']
+ node_ids_string = list(data['nodes'].keys())
+ for node_id_string in node_ids_string:
+ node_info = prepared_nodes.pop(node_id_string)
+ prepared_nodes[int(node_id_string)] = RotationNodeData(*node_info)
+ return prepared_groups
+
+
+def static_groups_filepath(schain_name: str) -> str:
+ static_groups_env_path = os.path.join(STATIC_GROUPS_FOLDER, ENV_TYPE)
+ return os.path.join(static_groups_env_path, f'schain-{schain_name}.json')
diff --git a/core/schains/config/node_info.py b/core/schains/config/node_info.py
index e4bb3386b..f285336bd 100644
--- a/core/schains/config/node_info.py
+++ b/core/schains/config/node_info.py
@@ -81,7 +81,7 @@ def generate_current_node_info(
sync_node: bool = False, archive: bool = False, catchup: bool = False
) -> CurrentNodeInfo:
wallets = generate_wallets_config(
- schain['name'],
+ schain.name,
rotation_id,
sync_node,
nodes_in_schain,
diff --git a/core/schains/config/predeployed.py b/core/schains/config/predeployed.py
index 43e85997b..eddddb343 100644
--- a/core/schains/config/predeployed.py
+++ b/core/schains/config/predeployed.py
@@ -19,6 +19,7 @@
import logging
+from skale.dataclasses.schain_options import AllocationType
from skale.wallets.web3_wallet import public_key_to_address
from etherbase_predeployed import (
@@ -60,6 +61,7 @@
def generate_predeployed_accounts(
schain_name: str,
schain_type: SchainType,
+ allocation_type: AllocationType,
schain_nodes: list,
on_chain_owner: str,
mainnet_owner: str,
@@ -80,6 +82,7 @@ def generate_predeployed_accounts(
if generation >= Gen.ONE:
v1_predeployed_contracts = generate_v1_predeployed_contracts(
schain_type=schain_type,
+ allocation_type=allocation_type,
on_chain_owner=on_chain_owner,
mainnet_owner=mainnet_owner,
originator_address=originator_address,
@@ -94,6 +97,7 @@ def generate_predeployed_accounts(
def generate_v1_predeployed_contracts(
schain_type: SchainType,
+ allocation_type: AllocationType,
on_chain_owner: str,
mainnet_owner: str,
originator_address: str,
@@ -127,7 +131,7 @@ def generate_v1_predeployed_contracts(
ima=message_proxy_for_schain_address,
)
- allocated_storage = get_fs_allocated_storage(schain_type)
+ allocated_storage = get_fs_allocated_storage(schain_type, allocation_type)
filestorage_generator = UpgradeableFileStorageGenerator()
filestorage_predeployed = filestorage_generator.generate_allocation(
contract_address=FILESTORAGE_ADDRESS,
diff --git a/core/schains/config/schain_info.py b/core/schains/config/schain_info.py
index d6a5bab83..22fc89d40 100644
--- a/core/schains/config/schain_info.py
+++ b/core/schains/config/schain_info.py
@@ -19,7 +19,7 @@
from dataclasses import dataclass
-from core.schains.limits import get_schain_limit, get_schain_type
+from core.schains.limits import get_allocation_type_name, get_schain_limit, get_schain_type
from core.schains.types import MetricType
from tools.configs.schains import MAX_CONSENSUS_STORAGE_INF_VALUE
@@ -75,23 +75,24 @@ def generate_schain_info(
sync_node: bool,
archive: bool
) -> SChainInfo:
- schain_type = get_schain_type(schain['partOfNode'])
- volume_limits = get_schain_limit(schain_type, MetricType.volume_limits)
+ schain_type = get_schain_type(schain.part_of_node)
+ allocation_type_name = get_allocation_type_name(schain.options.allocation_type)
+ volume_limits = get_schain_limit(schain_type, MetricType.volume_limits)[allocation_type_name]
if sync_node and archive:
volume_limits['max_consensus_storage_bytes'] = MAX_CONSENSUS_STORAGE_INF_VALUE
- leveldb_limits = get_schain_limit(schain_type, MetricType.leveldb_limits)
+ leveldb_limits = get_schain_limit(schain_type, MetricType.leveldb_limits)[allocation_type_name]
contract_storage_limit = leveldb_limits['contract_storage']
db_storage_limit = leveldb_limits['db_storage']
return SChainInfo(
schain_id=schain_id,
- name=schain['name'],
+ name=schain.name,
block_author=on_chain_etherbase,
contract_storage_limit=contract_storage_limit,
db_storage_limit=db_storage_limit,
node_groups=node_groups,
nodes=nodes,
- multitransaction_mode=schain['multitransactionMode'],
+ multitransaction_mode=schain.options.multitransaction_mode,
static_schain_info=static_schain_info,
**volume_limits
)
diff --git a/core/schains/config/skale_section.py b/core/schains/config/skale_section.py
index 340644be1..452a08007 100644
--- a/core/schains/config/skale_section.py
+++ b/core/schains/config/skale_section.py
@@ -58,9 +58,9 @@ def generate_skale_section(
schain_nodes=schain_nodes_with_schains
)
- schain_type = get_schain_type(schain['partOfNode'])
+ schain_type = get_schain_type(schain.part_of_node)
static_node_info = get_static_node_info(schain_type)
- static_schain_info = get_static_schain_info(schain['name'])
+ static_schain_info = get_static_schain_info(schain.name)
nodes_in_schain = len(schain_nodes_with_schains)
node_info = generate_current_node_info(
@@ -81,7 +81,7 @@ def generate_skale_section(
schain_nodes = generate_schain_nodes(
schain_nodes_with_schains=schain_nodes_with_schains,
- schain_name=schain['name'],
+ schain_name=schain.name,
rotation_id=rotation_id,
sync_node=sync_node
)
@@ -100,5 +100,5 @@ def generate_skale_section(
return SkaleConfig(
contract_settings=contract_settings,
node_info=node_info,
- schain_info=schain_info,
+ schain_info=schain_info
)
diff --git a/core/schains/config/static_params.py b/core/schains/config/static_params.py
index 83d140c12..62e83761d 100644
--- a/core/schains/config/static_params.py
+++ b/core/schains/config/static_params.py
@@ -21,13 +21,15 @@
from core.schains.config.helper import get_static_params
from tools.configs import ENV_TYPE
+from typing import Optional
+
def get_static_schain_cmd(env_type: str = ENV_TYPE) -> list:
static_params = get_static_params(env_type)
return static_params['schain_cmd']
-def get_static_schain_info(schain_name: str, env_type: str = ENV_TYPE) -> dict | None:
+def get_static_schain_info(schain_name: str, env_type: str = ENV_TYPE) -> Optional[dict]:
static_params = get_static_params(env_type)
static_params_schain = static_params['schain']
processed_params = {}
@@ -36,7 +38,7 @@ def get_static_schain_info(schain_name: str, env_type: str = ENV_TYPE) -> dict |
return processed_params
-def get_schain_static_param(static_param_schain: dict | int, schain_name: str) -> int:
+def get_schain_static_param(static_param_schain: dict, schain_name: str) -> int:
if isinstance(static_param_schain, int):
return static_param_schain
elif isinstance(static_param_schain, dict) and schain_name in static_param_schain:
diff --git a/core/schains/info.py b/core/schains/info.py
index ee0ec0ae3..0046b3980 100644
--- a/core/schains/info.py
+++ b/core/schains/info.py
@@ -47,8 +47,8 @@ def get_schain_info_by_name(skale: Skale, schain_name: str) -> SchainData:
return SchainData(
schain_name,
sid,
- contracts_info['mainnetOwner'],
- contracts_info['partOfNode'],
+ contracts_info.mainnet_owner,
+ contracts_info.part_of_node,
record.dkg_status,
record.is_deleted,
record.first_run,
diff --git a/core/schains/limits.py b/core/schains/limits.py
index e7b555d50..a2db93293 100644
--- a/core/schains/limits.py
+++ b/core/schains/limits.py
@@ -17,6 +17,9 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+from typing import TypedDict
+from skale.dataclasses.schain_options import AllocationType
+
from core.schains.types import SchainType, ContainerType, MetricType
from tools.helper import read_json
from tools.configs.resource_allocation import (
@@ -33,8 +36,12 @@ def get_schain_type(schain_part_of_node: int) -> SchainType:
return SchainType(schain_part_of_node)
+def get_allocation_type_name(allocation_type: AllocationType) -> str:
+ return allocation_type.name.lower()
+
+
def get_limit(metric_type: MetricType, schain_type: SchainType, container_type: ContainerType,
- resource_allocation: dict) -> int:
+ resource_allocation: TypedDict) -> TypedDict:
"""
Get allocation option from the resources allocation file
@@ -53,7 +60,7 @@ def get_limit(metric_type: MetricType, schain_type: SchainType, container_type:
return resource_allocation[container_type.name][metric_type.name][schain_type.name]
-def get_schain_limit(schain_type: SchainType, metric_type: MetricType) -> int:
+def get_schain_limit(schain_type: SchainType, metric_type: MetricType) -> TypedDict:
alloc = _get_resource_allocation_info()
return get_limit(metric_type, schain_type, ContainerType.schain, alloc)
@@ -63,8 +70,9 @@ def get_ima_limit(schain_type: SchainType, metric_type: MetricType) -> int:
return get_limit(metric_type, schain_type, ContainerType.ima, alloc)
-def get_fs_allocated_storage(schain_type: SchainType) -> str:
- volume_limits = get_schain_limit(schain_type, MetricType.volume_limits)
+def get_fs_allocated_storage(schain_type: SchainType, allocation_type: AllocationType) -> str:
+ allocation_type_name = get_allocation_type_name(allocation_type)
+ volume_limits = get_schain_limit(schain_type, MetricType.volume_limits)[allocation_type_name]
return volume_limits[FILESTORAGE_LIMIT_OPTION_NAME]
diff --git a/core/schains/monitor/action.py b/core/schains/monitor/action.py
index fb09d98c5..75dd0d5d1 100644
--- a/core/schains/monitor/action.py
+++ b/core/schains/monitor/action.py
@@ -35,20 +35,18 @@
run_dkg,
save_dkg_results
)
-from core.schains.ima import get_migration_ts as get_ima_migration_ts
from core.schains.cleaner import (
remove_ima_container,
remove_schain_container,
remove_schain_volume
)
+from core.schains.ima import get_migration_ts as get_ima_migration_ts, ImaData
+from core.schains.status import NodeCliStatus
from core.schains.firewall.types import IRuleController
-
from core.schains.volume import init_data_volume
from core.schains.exit_scheduler import ExitScheduleFileManager
-
from core.schains.limits import get_schain_type
-
from core.schains.monitor.containers import monitor_schain_container, monitor_ima_container
from core.schains.monitor.rpc import handle_failed_schain_rpc
from core.schains.runner import (
@@ -70,9 +68,8 @@
get_node_ips_from_config,
get_own_ip_from_config
)
-from core.schains.ima import ImaData
from core.schains.external_config import ExternalConfig, ExternalState
-from core.schains.skaled_status import init_skaled_status
+from core.schains.status import init_skaled_status
from core.schains.ssl import update_ssl_change_date
from tools.configs import SYNC_NODE
@@ -153,7 +150,7 @@ def __init__(
):
self.skale = skale
self.schain = schain
- self.generation = schain['generation']
+ self.generation = schain.generation
self.node_config = node_config
self.checks = checks
self.stream_version = stream_version
@@ -162,13 +159,13 @@ def __init__(
self.rotation_data = rotation_data
self.rotation_id = rotation_data['rotation_id']
self.estate = estate
- self.econfig = econfig or ExternalConfig(name=schain['name'])
+ self.econfig = econfig or ExternalConfig(name=schain.name)
self.node_options = node_options or NodeOptions()
self.cfm: ConfigFileManager = ConfigFileManager(
- schain_name=self.schain['name']
+ schain_name=self.schain.name
)
self.statsd_client = get_statsd_client()
- super().__init__(name=schain['name'])
+ super().__init__(name=schain.name)
@BaseActionManager.monitor_block
def config_dir(self) -> bool:
@@ -215,7 +212,7 @@ def dkg(self) -> bool:
def upstream_config(self) -> bool:
with self.statsd_client.timer(f'admin.action.upstream_config.{no_hyphens(self.name)}'):
logger.info(
- 'Creating new upstream_config rotation_id: %s, stream: %s',
+ 'Generating new upstream_config rotation_id: %s, stream: %s',
self.rotation_data.get('rotation_id'), self.stream_version
)
new_config = create_new_upstream_config(
@@ -232,6 +229,7 @@ def upstream_config(self) -> bool:
result = False
if not self.cfm.upstream_config_exists() or \
new_config != self.cfm.latest_upstream_config:
+ logger.info('Saving new config')
rotation_id = self.rotation_data['rotation_id']
logger.info(
'Saving new upstream config rotation_id: %d, ips: %s',
@@ -299,30 +297,32 @@ def __init__(
rule_controller: IRuleController,
checks: SkaledChecks,
node_config: NodeConfig,
+ ncli_status: NodeCliStatus,
econfig: Optional[ExternalConfig] = None,
dutils: DockerUtils = None,
node_options: NodeOptions = None
):
self.schain = schain
- self.generation = schain['generation']
+ self.generation = schain.generation
self.checks = checks
self.node_config = node_config
self.rc = rule_controller
- self.skaled_status = init_skaled_status(self.schain['name'])
- self.schain_type = get_schain_type(schain['partOfNode'])
- self.econfig = econfig or ExternalConfig(schain['name'])
+ self.skaled_status = init_skaled_status(self.schain.name)
+ self.schain_type = get_schain_type(schain.part_of_node)
+ self.econfig = econfig or ExternalConfig(schain.name)
self.cfm: ConfigFileManager = ConfigFileManager(
- schain_name=self.schain['name']
+ schain_name=self.schain.name
)
- self.esfm = ExitScheduleFileManager(schain['name'])
+ self.esfm = ExitScheduleFileManager(schain.name)
self.dutils = dutils or DockerUtils()
self.statsd_client = get_statsd_client()
self.node_options = node_options or NodeOptions()
+ self.ncli_status = ncli_status
- super().__init__(name=schain['name'])
+ super().__init__(name=schain.name)
@BaseActionManager.monitor_block
def volume(self) -> bool:
@@ -375,11 +375,13 @@ def skaled_container(
download_snapshot,
start_ts
)
+ snapshot_from = self.ncli_status.snapshot_from if self.ncli_status else None
monitor_schain_container(
self.schain,
schain_record=self.schain_record,
skaled_status=self.skaled_status,
download_snapshot=download_snapshot,
+ snapshot_from=snapshot_from,
start_ts=start_ts,
abort_on_exit=abort_on_exit,
dutils=self.dutils,
@@ -556,4 +558,11 @@ def notify_repair_mode(self) -> None:
@BaseActionManager.monitor_block
def disable_repair_mode(self) -> None:
logger.info('Switching off repair mode')
- self.schain_record.set_repair_mode(False)
+ if self.schain_record.repair_mode:
+ self.schain_record.set_repair_mode(False)
+
+ @BaseActionManager.monitor_block
+ def update_repair_ts(self, new_ts: int) -> None:
+ logger.info('Setting repair_ts to %d', new_ts)
+ new_dt = datetime.utcfromtimestamp(new_ts)
+ self.schain_record.set_repair_date(new_dt)
diff --git a/core/schains/monitor/config_monitor.py b/core/schains/monitor/config_monitor.py
index 47587a1bc..639689870 100644
--- a/core/schains/monitor/config_monitor.py
+++ b/core/schains/monitor/config_monitor.py
@@ -45,6 +45,8 @@ def run(self):
self.execute()
self.am.log_executed_blocks()
self.am._upd_last_seen()
+ except Exception as e:
+ logger.info('Config monitor type failed %s', typename, exc_info=e)
finally:
logger.info('Config monitor type finished %s', typename)
diff --git a/core/schains/monitor/containers.py b/core/schains/monitor/containers.py
index 001121395..d3c2142cd 100644
--- a/core/schains/monitor/containers.py
+++ b/core/schains/monitor/containers.py
@@ -20,6 +20,7 @@
import logging
import time
from typing import Optional
+from skale.contracts.manager.schains import SchainStructure
from core.schains.volume import is_volume_exists
from core.schains.runner import (
@@ -50,39 +51,40 @@
def monitor_schain_container(
- schain,
+ schain: SchainStructure,
schain_record,
skaled_status,
download_snapshot=False,
start_ts=None,
+ snapshot_from: Optional[str] = None,
abort_on_exit: bool = True,
dutils: Optional[DockerUtils] = None,
sync_node: bool = False,
historic_state: bool = False
) -> None:
dutils = dutils or DockerUtils()
- schain_name = schain['name']
- logger.info(f'Monitoring container for sChain {schain_name}')
+ schain.name = schain.name
+ logger.info(f'Monitoring container for sChain {schain.name}')
- if not is_volume_exists(schain_name, sync_node=sync_node, dutils=dutils):
- logger.error(f'Data volume for sChain {schain_name} does not exist')
+ if not is_volume_exists(schain.name, sync_node=sync_node, dutils=dutils):
+ logger.error(f'Data volume for sChain {schain.name} does not exist')
return
if skaled_status.exit_time_reached and abort_on_exit:
logger.info(
- f'{schain_name} - Skipping container monitor: exit time reached')
+ f'{schain.name} - Skipping container monitor: exit time reached')
skaled_status.log()
schain_record.reset_failed_counters()
return
- if not is_container_exists(schain_name, dutils=dutils):
- logger.info(f'SChain {schain_name}: container doesn\'t exits')
+ if not is_container_exists(schain.name, dutils=dutils):
+ logger.info(f'SChain {schain.name}: container doesn\'t exits')
run_schain_container(
schain=schain,
download_snapshot=download_snapshot,
start_ts=start_ts,
dutils=dutils,
- snapshot_from=schain_record.snapshot_from,
+ snapshot_from=snapshot_from,
sync_node=sync_node,
historic_state=historic_state,
)
@@ -92,14 +94,14 @@ def monitor_schain_container(
if skaled_status.clear_data_dir and skaled_status.start_from_snapshot:
logger.info(
- f'{schain_name} - Skipping container monitor: sChain should be repaired')
+ f'{schain.name} - Skipping container monitor: sChain should be repaired')
skaled_status.log()
schain_record.reset_failed_counters()
return
- if is_schain_container_failed(schain_name, dutils=dutils):
+ if is_schain_container_failed(schain.name, dutils=dutils):
if schain_record.restart_count < MAX_SCHAIN_RESTART_COUNT:
- logger.info('sChain %s: restarting container', schain_name)
+ logger.info('sChain %s: restarting container', schain.name)
restart_container(SCHAIN_CONTAINER, schain, dutils=dutils)
update_ssl_change_date(schain_record)
schain_record.set_restart_count(schain_record.restart_count + 1)
@@ -107,7 +109,7 @@ def monitor_schain_container(
else:
logger.warning(
'SChain %s: max restart count exceeded - %d',
- schain_name,
+ schain.name,
MAX_SCHAIN_RESTART_COUNT
)
else:
@@ -120,42 +122,41 @@ def monitor_ima_container(
migration_ts: int = 0,
dutils: DockerUtils = None
) -> None:
- schain_name = schain["name"]
if SYNC_NODE:
return
if not ima_data.linked:
- logger.info(f'{schain_name} - not registered in IMA, skipping')
+ logger.info(f'{schain.name} - not registered in IMA, skipping')
return
- copy_schain_ima_abi(schain_name)
+ copy_schain_ima_abi(schain.name)
container_exists = is_container_exists(
- schain_name, container_type=IMA_CONTAINER, dutils=dutils)
+ schain.name, container_type=IMA_CONTAINER, dutils=dutils)
if time.time() > migration_ts:
logger.debug('IMA migration time passed')
image = get_image_name(image_type=IMA_CONTAINER, new=True)
- time_frame = get_ima_time_frame(schain_name, after=True)
+ time_frame = get_ima_time_frame(schain.name, after=True)
if container_exists:
- container_image = get_container_image(schain_name, IMA_CONTAINER, dutils)
- container_time_frame = get_ima_container_time_frame(schain_name, dutils)
+ container_image = get_container_image(schain.name, IMA_CONTAINER, dutils)
+ container_time_frame = get_ima_container_time_frame(schain.name, dutils)
if image != container_image or time_frame != container_time_frame:
logger.info('Removing old container as part of IMA migration')
- remove_container(schain_name, IMA_CONTAINER, dutils)
+ remove_container(schain.name, IMA_CONTAINER, dutils)
container_exists = False
else:
- time_frame = get_ima_time_frame(schain_name, after=False)
+ time_frame = get_ima_time_frame(schain.name, after=False)
image = get_image_name(image_type=IMA_CONTAINER, new=False)
logger.debug('IMA time frame %d', time_frame)
if not container_exists:
logger.info(
'%s No IMA container, creating, image %s, time frame %d',
- schain_name, image, time_frame
+ schain.name, image, time_frame
)
run_ima_container(
schain,
@@ -166,4 +167,4 @@ def monitor_ima_container(
)
else:
logger.debug(
- 'sChain %s: IMA container exists, but not running, skipping', schain_name)
+ 'sChain %s: IMA container exists, but not running, skipping', schain.name)
diff --git a/core/schains/monitor/main.py b/core/schains/monitor/main.py
index 58010d347..b3c589724 100644
--- a/core/schains/monitor/main.py
+++ b/core/schains/monitor/main.py
@@ -18,66 +18,69 @@
# along with this program. If not, see .
import functools
-import time
-import random
import logging
-from typing import Dict
-from concurrent.futures import Future, ThreadPoolExecutor
+import os
+import time
+from typing import Callable, Optional
from importlib import reload
-from typing import List, Optional
from skale import Skale, SkaleIma
+from skale.contracts.manager.schains import SchainStructure
from web3._utils import request as web3_request
from core.node import get_skale_node_version
from core.node_config import NodeConfig
from core.schains.checks import ConfigChecks, get_api_checks_status, TG_ALLOWED_CHECKS, SkaledChecks
from core.schains.config.file_manager import ConfigFileManager
+from core.schains.config.static_params import get_automatic_repair_option
from core.schains.firewall import get_default_rule_controller
from core.schains.firewall.utils import get_sync_agent_ranges
+from core.schains.external_config import ExternalConfig, ExternalState
from core.schains.monitor import get_skaled_monitor, RegularConfigMonitor, SyncConfigMonitor
from core.schains.monitor.action import ConfigActionManager, SkaledActionManager
-from core.schains.external_config import ExternalConfig, ExternalState
-from core.schains.task import keep_tasks_running, Task
-from core.schains.config.static_params import get_automatic_repair_option
-from core.schains.skaled_status import get_skaled_status
+from core.schains.monitor.tasks import execute_tasks, Future, ITask
+from core.schains.process import ProcessReport
+from core.schains.status import get_node_cli_status, get_skaled_status
from core.node import get_current_nodes
from tools.docker_utils import DockerUtils
from tools.configs import SYNC_NODE
+from tools.configs.schains import DKG_TIMEOUT_COEFFICIENT
from tools.notifications.messages import notify_checks
from tools.helper import is_node_part_of_chain, no_hyphens
from tools.resources import get_statsd_client
-from web.models.schain import SChainRecord
+from web.models.schain import SChainRecord, upsert_schain_record
-MIN_SCHAIN_MONITOR_SLEEP_INTERVAL = 20
-MAX_SCHAIN_MONITOR_SLEEP_INTERVAL = 40
+logger = logging.getLogger(__name__)
-SKALED_PIPELINE_SLEEP = 2
-CONFIG_PIPELINE_SLEEP = 3
-logger = logging.getLogger(__name__)
+class NoTasksToRunError(Exception):
+ pass
def run_config_pipeline(
- skale: Skale, skale_ima: SkaleIma, schain: Dict, node_config: NodeConfig, stream_version: str
+ schain_name: str,
+ skale: Skale,
+ skale_ima: SkaleIma,
+ node_config: NodeConfig,
+ stream_version: str,
) -> None:
- name = schain['name']
- schain_record = SChainRecord.get_by_name(name)
- rotation_data = skale.node_rotation.get_rotation(name)
+ schain = skale.schains.get_by_name(schain_name)
+ schain_record = SChainRecord.get_by_name(schain_name)
+ rotation_data = skale.node_rotation.get_rotation(schain_name)
allowed_ranges = get_sync_agent_ranges(skale)
- ima_linked = not SYNC_NODE and skale_ima.linker.has_schain(name)
- group_index = skale.schains.name_to_group_id(name)
+ ima_linked = not SYNC_NODE and skale_ima.linker.has_schain(schain_name)
+ group_index = skale.schains.name_to_group_id(schain_name)
last_dkg_successful = skale.dkg.is_last_dkg_successful(group_index)
- current_nodes = get_current_nodes(skale, name)
+ current_nodes = get_current_nodes(skale, schain_name)
estate = ExternalState(
ima_linked=ima_linked, chain_id=skale_ima.web3.eth.chain_id, ranges=allowed_ranges
)
- econfig = ExternalConfig(name)
+ econfig = ExternalConfig(schain_name)
config_checks = ConfigChecks(
- schain_name=name,
+ schain_name=schain_name,
node_id=node_config.id,
schain_record=schain_record,
stream_version=stream_version,
@@ -105,9 +108,7 @@ def run_config_pipeline(
if SYNC_NODE:
logger.info(
- 'Sync node last_dkg_successful %s, rotation_data %s',
- last_dkg_successful,
- rotation_data
+ 'Sync node last_dkg_successful %s, rotation_data %s', last_dkg_successful, rotation_data
)
mon = SyncConfigMonitor(config_am, config_checks)
else:
@@ -115,86 +116,207 @@ def run_config_pipeline(
mon = RegularConfigMonitor(config_am, config_checks)
statsd_client = get_statsd_client()
- statsd_client.incr(f'admin.config_pipeline.{mon.__class__.__name__}.{no_hyphens(name)}')
+ statsd_client.incr(f'admin.config_pipeline.{mon.__class__.__name__}.{no_hyphens(schain_name)}')
statsd_client.gauge(
- f'admin.config_pipeline.rotation_id.{no_hyphens(name)}', rotation_data['rotation_id']
+ f'admin.config_pipeline.rotation_id.{no_hyphens(schain_name)}', rotation_data['rotation_id']
)
- with statsd_client.timer(f'admin.config_pipeline.duration.{no_hyphens(name)}'):
+ with statsd_client.timer(f'admin.config_pipeline.duration.{no_hyphens(schain_name)}'):
mon.run()
def run_skaled_pipeline(
- skale: Skale, schain: Dict, node_config: NodeConfig, dutils: DockerUtils
+ schain_name: str, skale: Skale, node_config: NodeConfig, dutils: DockerUtils
) -> None:
- name = schain['name']
- schain_record = SChainRecord.get_by_name(name)
+ schain = skale.schains.get_by_name(schain_name)
+ schain_record = SChainRecord.get_by_name(schain_name)
+
logger.info('Record: %s', SChainRecord.to_dict(schain_record))
dutils = dutils or DockerUtils()
- rc = get_default_rule_controller(name=name)
+ rc = get_default_rule_controller(name=schain_name)
skaled_checks = SkaledChecks(
- schain_name=schain['name'],
+ schain_name=schain.name,
schain_record=schain_record,
rule_controller=rc,
dutils=dutils,
sync_node=SYNC_NODE,
)
- skaled_status = get_skaled_status(name)
+ skaled_status = get_skaled_status(schain_name)
+ ncli_status = get_node_cli_status(schain_name)
skaled_am = SkaledActionManager(
schain=schain,
rule_controller=rc,
checks=skaled_checks,
node_config=node_config,
- econfig=ExternalConfig(name),
+ ncli_status=ncli_status,
+ econfig=ExternalConfig(schain_name),
dutils=dutils,
)
- status = skaled_checks.get_all(log=False, expose=True)
+ check_status = skaled_checks.get_all(log=False, expose=True)
automatic_repair = get_automatic_repair_option()
- api_status = get_api_checks_status(status=status, allowed=TG_ALLOWED_CHECKS)
- notify_checks(name, node_config.all(), api_status)
+ api_status = get_api_checks_status(status=check_status, allowed=TG_ALLOWED_CHECKS)
+ notify_checks(schain_name, node_config.all(), api_status)
- logger.info('Skaled status: %s', status)
+ logger.info('Skaled check status: %s', check_status)
logger.info('Upstream config %s', skaled_am.upstream_config_path)
mon = get_skaled_monitor(
action_manager=skaled_am,
- status=status,
+ check_status=check_status,
schain_record=schain_record,
skaled_status=skaled_status,
+ ncli_status=ncli_status,
automatic_repair=automatic_repair,
)
statsd_client = get_statsd_client()
- statsd_client.incr(f'admin.skaled_pipeline.{mon.__name__}.{no_hyphens(name)}')
- with statsd_client.timer(f'admin.skaled_pipeline.duration.{no_hyphens(name)}'):
+ statsd_client.incr(f'admin.skaled_pipeline.{mon.__name__}.{no_hyphens(schain_name)}')
+ with statsd_client.timer(f'admin.skaled_pipeline.duration.{no_hyphens(schain_name)}'):
mon(skaled_am, skaled_checks).run()
-def post_monitor_sleep():
- schain_monitor_sleep = random.randint(
- MIN_SCHAIN_MONITOR_SLEEP_INTERVAL, MAX_SCHAIN_MONITOR_SLEEP_INTERVAL
- )
- logger.info('Monitor iteration completed, sleeping for %d', schain_monitor_sleep)
- time.sleep(schain_monitor_sleep)
+class SkaledTask(ITask):
+ NAME = 'skaled'
+ STUCK_TIMEOUT_SECONDS = 3600 # 1 hour
+
+ def __init__(
+ self,
+ schain_name: str,
+ skale: Skale,
+ node_config: NodeConfig,
+ stream_version: str,
+ dutils: Optional[DockerUtils] = None,
+ ) -> None:
+ self.schain_name = schain_name
+ self.skale = skale
+ self.node_config = node_config
+ self.dutils = dutils
+ self._future = Future()
+ self._start_ts = 0
+ self.stream_version = stream_version
+
+ @property
+ def name(self) -> str:
+ return self.NAME
+
+ @property
+ def stuck_timeout(self) -> int:
+ return self.STUCK_TIMEOUT_SECONDS
+
+ @property
+ def future(self) -> Future:
+ return self._future
+
+ @future.setter
+ def future(self, value: Future) -> None:
+ self._future = value
+
+ @property
+ def start_ts(self) -> int:
+ return self._start_ts
+
+ @start_ts.setter
+ def start_ts(self, value: int) -> None:
+ self._start_ts = value
+
+ @property
+ def needed(self) -> bool:
+ schain_record = upsert_schain_record(self.schain_name)
+ return schain_record.config_version == self.stream_version and (
+ not schain_record.sync_config_run or not schain_record.first_run
+ )
+ def create_pipeline(self) -> Callable:
+ return functools.partial(
+ run_skaled_pipeline,
+ schain_name=self.schain_name,
+ skale=self.skale,
+ node_config=self.node_config,
+ dutils=self.dutils,
+ )
-def create_and_execute_tasks(
- skale,
- schain,
+
+class ConfigTask(ITask):
+ NAME = 'config'
+ STUCK_TIMEOUT_SECONDS = 60 * 60 * 2
+
+ def __init__(
+ self,
+ schain_name: str,
+ skale: Skale,
+ skale_ima: SkaleIma,
+ node_config: NodeConfig,
+ stream_version: str,
+ ) -> None:
+ self.schain_name = schain_name
+ self.skale = skale
+ self.skale_ima = skale_ima
+ self.node_config = node_config
+ self.stream_version = stream_version
+ self._start_ts = 0
+ self._future = Future()
+
+ @property
+ def name(self) -> str:
+ return self.NAME
+
+ @property
+ def future(self) -> Future:
+ return self._future
+
+ @future.setter
+ def future(self, value: Future) -> None:
+ self._future = value
+
+ @property
+ def stuck_timeout(self) -> int:
+ dkg_timeout = self.skale.constants_holder.get_dkg_timeout()
+ return int(dkg_timeout * DKG_TIMEOUT_COEFFICIENT)
+
+ @property
+ def start_ts(self) -> int:
+ return self._start_ts
+
+ @start_ts.setter
+ def start_ts(self, value: int) -> None:
+ self._start_ts = value
+
+ @property
+ def needed(self) -> bool:
+ return SYNC_NODE or is_node_part_of_chain(self.skale, self.schain_name, self.node_config.id)
+
+ def create_pipeline(self) -> Callable:
+ return functools.partial(
+ run_config_pipeline,
+ schain_name=self.schain_name,
+ skale=self.skale,
+ skale_ima=self.skale_ima,
+ node_config=self.node_config,
+ stream_version=self.stream_version,
+ )
+
+
+def start_tasks(
+ skale: Skale,
+ schain: SchainStructure,
node_config: NodeConfig,
skale_ima: SkaleIma,
- stream_version,
- schain_record,
- executor,
- futures,
- dutils,
-):
+ dutils: Optional[DockerUtils] = None,
+) -> bool:
reload(web3_request)
- name = schain['name']
+
+ name = schain.name
+ init_ts, pid = int(time.time()), os.getpid()
+ logger.info('Initialazing process report %d %d', pid, init_ts)
+ process_report = ProcessReport(name)
+ process_report.update(pid, init_ts)
+
+ stream_version = get_skale_node_version()
+ schain_record = upsert_schain_record(name)
is_rotation_active = skale.node_rotation.is_rotation_active(name)
@@ -215,74 +337,26 @@ def create_and_execute_tasks(
statsd_client.incr(f'admin.schain.monitor.{no_hyphens(name)}')
statsd_client.gauge(f'admin.schain.monitor_last_seen.{no_hyphens(name)}', monitor_last_seen_ts)
- tasks = []
- if not leaving_chain:
- logger.info('Adding config task to the pool')
- tasks.append(
- Task(
- f'{name}-config',
- functools.partial(
- run_config_pipeline,
- skale=skale,
- skale_ima=skale_ima,
- schain=schain,
- node_config=node_config,
- stream_version=stream_version,
- ),
- sleep=CONFIG_PIPELINE_SLEEP,
- )
- )
if schain_record.config_version != stream_version or (
schain_record.sync_config_run and schain_record.first_run
):
+ logger.info('Fetching upstream config requested. Removing the old skaled config')
ConfigFileManager(name).remove_skaled_config()
- else:
- logger.info('Adding skaled task to the pool')
- tasks.append(
- Task(
- f'{name}-skaled',
- functools.partial(
- run_skaled_pipeline,
- skale=skale,
- schain=schain,
- node_config=node_config,
- dutils=dutils,
- ),
- sleep=SKALED_PIPELINE_SLEEP,
- )
- )
- if len(tasks) == 0:
- logger.warning('No tasks to run')
- keep_tasks_running(executor, tasks, futures)
-
-
-def run_monitor_for_schain(
- skale, skale_ima, node_config: NodeConfig, schain, dutils=None, once=False
-):
- stream_version = get_skale_node_version()
- tasks_number = 2
- with ThreadPoolExecutor(max_workers=tasks_number, thread_name_prefix='T') as executor:
- futures: List[Optional[Future]] = [None for i in range(tasks_number)]
- while True:
- schain_record = SChainRecord.get_by_name(schain['name'])
- try:
- create_and_execute_tasks(
- skale,
- schain,
- node_config,
- skale_ima,
- stream_version,
- schain_record,
- executor,
- futures,
- dutils,
- )
- if once:
- return True
- post_monitor_sleep()
- except Exception:
- logger.exception('Monitor iteration failed')
- if once:
- return False
- post_monitor_sleep()
+ tasks = [
+ ConfigTask(
+ schain_name=schain.name,
+ skale=skale,
+ skale_ima=skale_ima,
+ node_config=node_config,
+ stream_version=stream_version,
+ ),
+ SkaledTask(
+ schain_name=schain.name,
+ skale=skale,
+ node_config=node_config,
+ stream_version=stream_version,
+ dutils=dutils
+ ),
+ ]
+ execute_tasks(tasks=tasks, process_report=process_report)
diff --git a/core/schains/monitor/rpc.py b/core/schains/monitor/rpc.py
index 4632ce918..5d9564221 100644
--- a/core/schains/monitor/rpc.py
+++ b/core/schains/monitor/rpc.py
@@ -19,6 +19,8 @@
import logging
+from skale.contracts.manager.schains import SchainStructure
+
from core.schains.runner import restart_container
from core.schains.runner import is_container_exists, is_container_running
from tools.docker_utils import DockerUtils
@@ -33,31 +35,30 @@
def handle_failed_schain_rpc(
- schain,
+ schain: SchainStructure,
schain_record,
skaled_status,
dutils=None
):
dutils = dutils or DockerUtils()
- schain_name = schain['name']
- logger.info(f'Monitoring RPC for sChain {schain_name}')
+ logger.info(f'Monitoring RPC for sChain {schain.name}')
- if not is_container_exists(schain_name, dutils=dutils):
- logger.warning(f'{schain_name} RPC monitor failed: container doesn\'t exit')
+ if not is_container_exists(schain.name, dutils=dutils):
+ logger.warning(f'{schain.name} RPC monitor failed: container doesn\'t exit')
return
- if not is_container_running(schain_name, dutils=dutils):
- logger.warning(f'{schain_name} RPC monitor failed: container is not running')
+ if not is_container_running(schain.name, dutils=dutils):
+ logger.warning(f'{schain.name} RPC monitor failed: container is not running')
return
if skaled_status.exit_time_reached:
- logger.info(f'{schain_name} - Skipping RPC monitor: exit time reached')
+ logger.info(f'{schain.name} - Skipping RPC monitor: exit time reached')
skaled_status.log()
schain_record.set_failed_rpc_count(0)
return
if skaled_status.downloading_snapshot:
- logger.info(f'{schain_name} - Skipping RPC monitor: downloading snapshot')
+ logger.info(f'{schain.name} - Skipping RPC monitor: downloading snapshot')
skaled_status.log()
schain_record.set_failed_rpc_count(0)
return
@@ -65,18 +66,18 @@ def handle_failed_schain_rpc(
rpc_stuck = schain_record.failed_rpc_count > MAX_SCHAIN_FAILED_RPC_COUNT
logger.info(
'SChain %s, rpc stuck: %s, failed_rpc_count: %d, restart_count: %d',
- schain_name,
+ schain.name,
rpc_stuck,
schain_record.failed_rpc_count,
schain_record.restart_count
)
if rpc_stuck:
if schain_record.restart_count < MAX_SCHAIN_RESTART_COUNT:
- logger.info(f'SChain {schain_name}: restarting container')
+ logger.info(f'SChain {schain.name}: restarting container')
restart_container(SCHAIN_CONTAINER, schain, dutils=dutils)
schain_record.set_restart_count(schain_record.restart_count + 1)
else:
- logger.warning(f'SChain {schain_name}: max restart count exceeded')
+ logger.warning(f'SChain {schain.name}: max restart count exceeded')
schain_record.set_failed_rpc_count(0)
else:
schain_record.set_failed_rpc_count(schain_record.failed_rpc_count + 1)
diff --git a/core/schains/monitor/skaled_monitor.py b/core/schains/monitor/skaled_monitor.py
index aac40e16a..a0fe5c99b 100644
--- a/core/schains/monitor/skaled_monitor.py
+++ b/core/schains/monitor/skaled_monitor.py
@@ -26,7 +26,7 @@
from core.schains.checks import SkaledChecks
from core.schains.monitor.action import SkaledActionManager
from core.schains.config.main import get_number_of_secret_shares
-from core.schains.skaled_status import SkaledStatus
+from core.schains.status import NodeCliStatus, SkaledStatus
from core.schains.ssl import ssl_reload_needed
from tools.configs import SYNC_NODE
from tools.resources import get_statsd_client
@@ -37,11 +37,7 @@
class BaseSkaledMonitor(IMonitor):
- def __init__(
- self,
- action_manager: SkaledActionManager,
- checks: SkaledChecks
- ) -> None:
+ def __init__(self, action_manager: SkaledActionManager, checks: SkaledChecks) -> None:
self.am = action_manager
self.checks = checks
self.statsd_client = get_statsd_client()
@@ -59,12 +55,13 @@ def run(self):
self.am._upd_schain_record()
self.am.log_executed_blocks()
self.am._upd_last_seen()
+ except Exception as e:
+ logger.info('Skaled monitor type failed %s', typename, exc_info=e)
finally:
logger.info('Skaled monitor type finished %s', typename)
class RegularSkaledMonitor(BaseSkaledMonitor):
-
def execute(self) -> None:
if not self.checks.firewall_rules:
self.am.firewall_rules()
@@ -90,7 +87,7 @@ def execute(self) -> None:
logger.warning(
'Repair mode execution, record: %s, exit_code_ok: %s',
self.checks.schain_record.repair_mode,
- self.checks.exit_code_ok.status
+ self.checks.exit_code_ok.status,
)
self.am.notify_repair_mode()
self.am.cleanup_schain_docker_entity()
@@ -102,7 +99,7 @@ def execute(self) -> None:
self.am.skaled_container(download_snapshot=True)
else:
self.am.reset_restart_count()
- self.am.disable_repair_mode()
+ self.am.update_repair_ts(new_ts=int(time.time()))
class BackupSkaledMonitor(BaseSkaledMonitor):
@@ -223,10 +220,7 @@ def execute(self):
if not self.checks.firewall_rules:
self.am.firewall_rules()
if not self.checks.skaled_container:
- self.am.skaled_container(
- download_snapshot=True,
- start_ts=self.am.finish_ts
- )
+ self.am.skaled_container(download_snapshot=True, start_ts=self.am.finish_ts)
else:
self.am.reset_restart_counter()
if not self.checks.ima_container:
@@ -239,36 +233,34 @@ def is_backup_mode(schain_record: SChainRecord) -> bool:
def is_repair_mode(
schain_record: SChainRecord,
- status: Dict,
+ check_status: Dict,
skaled_status: Optional[SkaledStatus],
- automatic_repair: bool
+ ncli_status: Optional[NodeCliStatus],
+ automatic_repair: bool,
) -> bool:
- if schain_record.repair_mode:
+ repair_ts = int(schain_record.repair_date.timestamp())
+ if ncli_status is not None and ncli_status.repair_ts > repair_ts:
return True
- else:
- return automatic_repair and is_skaled_repair_status(status, skaled_status)
+ return automatic_repair and is_skaled_repair_internal(check_status, skaled_status)
-def is_reload_group_mode(status: Dict, finish_ts: Optional[int]) -> bool:
+def is_reload_group_mode(check_status: Dict, finish_ts: Optional[int]) -> bool:
ts = int(time.time())
if finish_ts is None:
return False
- return finish_ts > ts and status['config'] and not status['config_updated']
+ return finish_ts > ts and check_status['config'] and not check_status['config_updated']
-def is_reload_ip_mode(status: Dict, reload_ts: Optional[int]) -> bool:
+def is_reload_ip_mode(check_status: Dict, reload_ts: Optional[int]) -> bool:
if reload_ts is None:
return False
- return status['config'] and not status['config_updated']
+ return check_status['config'] and not check_status['config_updated']
-def is_config_update_time(
- status: Dict,
- skaled_status: Optional[SkaledStatus]
-) -> bool:
+def is_config_update_time(check_status: Dict, skaled_status: Optional[SkaledStatus]) -> bool:
if not skaled_status:
return False
- return not status['skaled_container'] and skaled_status.exit_time_reached
+ return not check_status['skaled_container'] and skaled_status.exit_time_reached
def is_recreate_mode(status: Dict, schain_record: SChainRecord) -> bool:
@@ -283,24 +275,25 @@ def is_new_node_mode(schain_record: SChainRecord, finish_ts: Optional[int]) -> b
return finish_ts > ts and secret_shares_number == 1
-def is_skaled_repair_status(status: Dict, skaled_status: Optional[SkaledStatus]) -> bool:
+def is_skaled_repair_internal(check_status: Dict, skaled_status: Optional[SkaledStatus]) -> bool:
if skaled_status is None:
return False
skaled_status.log()
needs_repair = skaled_status.clear_data_dir and skaled_status.start_from_snapshot
- return not status['skaled_container'] and needs_repair
+ return not check_status['skaled_container'] and needs_repair
-def no_config(status: Dict) -> bool:
- return not status['config']
+def no_config(check_status: Dict) -> bool:
+ return not check_status['config']
def get_skaled_monitor(
action_manager: SkaledActionManager,
- status: Dict,
+ check_status: Dict,
schain_record: SChainRecord,
skaled_status: SkaledStatus,
- automatic_repair: bool = True
+ ncli_status: NodeCliStatus,
+ automatic_repair: bool = True,
) -> Type[BaseSkaledMonitor]:
logger.info('Choosing skaled monitor')
if skaled_status:
@@ -309,32 +302,32 @@ def get_skaled_monitor(
mon_type: Type[BaseSkaledMonitor] = RegularSkaledMonitor
if SYNC_NODE:
- if no_config(status):
+ if no_config(check_status):
mon_type = NoConfigSkaledMonitor
- if is_recreate_mode(status, schain_record):
+ if is_recreate_mode(check_status, schain_record):
mon_type = RecreateSkaledMonitor
- elif is_config_update_time(status, skaled_status):
+ elif is_config_update_time(check_status, skaled_status):
mon_type = UpdateConfigSkaledMonitor
- elif is_reload_group_mode(status, action_manager.upstream_finish_ts):
+ elif is_reload_group_mode(check_status, action_manager.upstream_finish_ts):
mon_type = ReloadGroupSkaledMonitor
- elif is_reload_ip_mode(status, action_manager.econfig.reload_ts):
+ elif is_reload_ip_mode(check_status, action_manager.econfig.reload_ts):
mon_type = ReloadIpSkaledMonitor
return mon_type
- if no_config(status):
+ if no_config(check_status):
mon_type = NoConfigSkaledMonitor
elif is_backup_mode(schain_record):
mon_type = BackupSkaledMonitor
- elif is_repair_mode(schain_record, status, skaled_status, automatic_repair):
+ elif is_repair_mode(schain_record, check_status, skaled_status, ncli_status, automatic_repair):
mon_type = RepairSkaledMonitor
- elif is_recreate_mode(status, schain_record):
+ elif is_recreate_mode(check_status, schain_record):
mon_type = RecreateSkaledMonitor
elif is_new_node_mode(schain_record, action_manager.finish_ts):
mon_type = NewNodeSkaledMonitor
- elif is_config_update_time(status, skaled_status):
+ elif is_config_update_time(check_status, skaled_status):
mon_type = UpdateConfigSkaledMonitor
- elif is_reload_group_mode(status, action_manager.upstream_finish_ts):
+ elif is_reload_group_mode(check_status, action_manager.upstream_finish_ts):
mon_type = ReloadGroupSkaledMonitor
- elif is_reload_ip_mode(status, action_manager.econfig.reload_ts):
+ elif is_reload_ip_mode(check_status, action_manager.econfig.reload_ts):
mon_type = ReloadIpSkaledMonitor
return mon_type
diff --git a/core/schains/monitor/tasks.py b/core/schains/monitor/tasks.py
new file mode 100644
index 000000000..7fcc86f82
--- /dev/null
+++ b/core/schains/monitor/tasks.py
@@ -0,0 +1,86 @@
+import abc
+import logging
+import time
+from concurrent.futures import Future, ThreadPoolExecutor
+from typing import Callable
+
+from core.schains.process import ProcessReport
+
+
+logger = logging.getLogger(__name__)
+
+
+SLEEP_INTERVAL_SECONDS = 10
+
+
+class ITask(metaclass=abc.ABCMeta):
+ @property
+ @abc.abstractmethod
+ def name(self) -> str:
+ pass
+
+ @property
+ @abc.abstractmethod
+ def stuck_timeout(self) -> int:
+ pass
+
+ @abc.abstractmethod
+ def create_pipeline(self) -> Callable:
+ pass
+
+ @property
+ @abc.abstractmethod
+ def future(self) -> Future:
+ pass
+
+ @future.setter
+ @abc.abstractmethod
+ def future(self, value: Future) -> None:
+ pass
+
+ @property
+ def needed(self) -> bool:
+ pass
+
+ @property
+ @abc.abstractmethod
+ def start_ts(self) -> int:
+ pass
+
+ @start_ts.setter
+ @abc.abstractmethod
+ def start_ts(self, value: int) -> None:
+ pass
+
+
+def execute_tasks(
+ tasks: list[ITask],
+ process_report: ProcessReport,
+ sleep_interval: int = SLEEP_INTERVAL_SECONDS,
+) -> None:
+ logger.info('Running tasks %s', tasks)
+ with ThreadPoolExecutor(max_workers=len(tasks), thread_name_prefix='T') as executor:
+ stucked = []
+ while True:
+ for index, task in enumerate(tasks):
+ if not task.future.running() and task.needed and len(stucked) == 0:
+ task.start_ts = int(time.time())
+ logger.info('Starting task %s at %d', task.name, task.start_ts)
+ pipeline = task.create_pipeline()
+ task.future = executor.submit(pipeline)
+ elif task.future.running():
+ if int(time.time()) - task.start_ts > task.stuck_timeout:
+ logger.info('Canceling future for %s', task.name)
+ canceled = task.future.cancel()
+ if not canceled:
+ logger.warning('Stuck detected for job %s', task.name)
+ task.start_ts = -1
+ stucked.append(task.name)
+ time.sleep(sleep_interval)
+ if len(stucked) > 0:
+ logger.info('Sleeping before subverting execution')
+ executor.shutdown(wait=False)
+ logger.info('Subverting execution. Stucked %s', stucked)
+ process_report.ts = 0
+ break
+ process_report.ts = int(time.time())
diff --git a/core/schains/process.py b/core/schains/process.py
new file mode 100644
index 000000000..1da149995
--- /dev/null
+++ b/core/schains/process.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of SKALE Admin
+#
+# Copyright (C) 2021 SKALE Labs
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import json
+import logging
+import os
+import shutil
+import signal
+from typing import Tuple
+
+import pathlib
+import psutil
+
+from tools.configs.schains import SCHAINS_DIR_PATH
+from tools.helper import check_pid
+
+
+logger = logging.getLogger(__name__)
+
+P_KILL_WAIT_TIMEOUT = 60
+
+
+def is_schain_process_report_exist(schain_name: str) -> None:
+ path = pathlib.Path(SCHAINS_DIR_PATH).joinpath(schain_name, ProcessReport.REPORT_FILENAME)
+ return path.is_file()
+
+
+def get_schain_process_info(schain_name: str) -> Tuple[int | None, int | None]:
+ report = ProcessReport(schain_name)
+ if not ProcessReport(schain_name).is_exist():
+ return None, None
+ else:
+ return report.pid, report.ts
+
+
+class ProcessReport:
+ REPORT_FILENAME = 'process.json'
+
+ def __init__(self, name: str) -> None:
+ self.path = pathlib.Path(SCHAINS_DIR_PATH).joinpath(name, self.REPORT_FILENAME)
+ self.path.parent.mkdir(parents=True, exist_ok=True)
+
+ def is_exist(self) -> bool:
+ return os.path.isfile(self.path)
+
+ @property
+ def ts(self) -> int:
+ return self.read()['ts']
+
+ @ts.setter
+ def ts(self, value: int) -> None:
+ report = {}
+ if self.is_exist():
+ report = self.read()
+ report['ts'] = value
+ self._save_tmp(report)
+ self._move()
+
+ @property
+ def pid(self) -> int:
+ return self.read()['pid']
+
+ @pid.setter
+ def pid(self, value: int) -> None:
+ report = {}
+ if self.is_exist():
+ report = self.read()
+ report['pid'] = value
+ self._save_tmp(report)
+ self._move()
+
+ @property
+ def _tmp_path(self) -> str:
+ return self.path.with_stem('.tmp.' + self.path.stem)
+
+ def read(self) -> dict:
+ with open(self.path) as process_file:
+ data = json.load(process_file)
+ return data
+
+ def _save_tmp(self, report: dict) -> None:
+ with open(self._tmp_path, 'w') as tmp_file:
+ json.dump(report, tmp_file)
+
+ def _move(self) -> str:
+ if os.path.isfile(self._tmp_path):
+ shutil.move(self._tmp_path, self.path)
+
+ def update(self, pid: int, ts: int) -> None:
+ report = {'pid': pid, 'ts': ts}
+ self._save_tmp(report=report)
+ self._move()
+
+ def cleanup(self) -> None:
+ os.remove(self.path)
+
+
+def terminate_process(
+ pid: int,
+ kill_timeout: int = P_KILL_WAIT_TIMEOUT,
+ log_msg: str = ''
+) -> None:
+ log_prefix = f'pid: {pid} - '
+
+ if log_msg != '':
+ log_prefix += f'{log_msg} - '
+ if pid == 0:
+ logger.warning(f'{log_prefix} - pid is 0, skipping')
+ return
+ try:
+ logger.warning(f'{log_prefix} - going to terminate')
+ p = psutil.Process(pid)
+ os.kill(p.pid, signal.SIGTERM)
+ p.wait(timeout=kill_timeout)
+ logger.info(f'{log_prefix} was terminated')
+ except psutil.NoSuchProcess:
+ logger.info(f'{log_prefix} - no such process')
+ except psutil.TimeoutExpired:
+ logger.warning(f'{log_prefix} - timout expired, going to kill')
+ p.kill()
+ logger.info(f'{log_prefix} - process was killed')
+ except Exception:
+ logger.exception(f'{log_prefix} - termination failed!')
+ return
+
+
+def is_monitor_process_alive(monitor_pid: int) -> bool:
+ """Checks that provided monitor_id is inited and alive"""
+ return monitor_pid != 0 and check_pid(monitor_pid)
diff --git a/core/schains/process_manager.py b/core/schains/process_manager.py
index fddaa6a4d..e14857a34 100644
--- a/core/schains/process_manager.py
+++ b/core/schains/process_manager.py
@@ -17,44 +17,30 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
-import sys
import logging
-from typing import Dict
+import time
from multiprocessing import Process
+from typing import Optional
-from skale import Skale
+from skale import Skale, SkaleIma
+from skale.contracts.manager.schains import SchainStructure
-from core.schains.monitor.main import run_monitor_for_schain
+from core.node_config import NodeConfig
+from core.schains.monitor.main import start_tasks
from core.schains.notifications import notify_if_not_enough_balance
-from core.schains.process_manager_helper import (
- terminate_stuck_schain_process, is_monitor_process_alive, terminate_process
+from core.schains.process import (
+ get_schain_process_info,
+ is_monitor_process_alive,
+ terminate_process
)
-from web.models.schain import upsert_schain_record, SChainRecord
from tools.str_formatters import arguments_list_string
-
+from tools.configs.schains import DKG_TIMEOUT_COEFFICIENT
logger = logging.getLogger(__name__)
-def pm_signal_handler(*args):
- """
- This function is trigerred when SIGTERM signal is received by the main process of the app.
- The purpose of the process manager signal handler is to forward SIGTERM signal to all sChain
- processes so they can gracefully save DKG results before
- """
- records = SChainRecord.select()
- print(f'schain_records: {len(records)}')
- print(f'schain_records: {records}')
- for r in records:
- logger.warning(f'Sending SIGTERM to {r.name}, {r.monitor_id}')
- terminate_process(r.monitor_id)
- logger.warning('All sChain processes stopped, exiting...')
- sys.exit(0)
-
-
-def run_process_manager(skale, skale_ima, node_config):
- # signal.signal(signal.SIGTERM, pm_signal_handler)
+def run_process_manager(skale: Skale, skale_ima: SkaleIma, node_config: NodeConfig) -> None:
logger.info('Process manager started')
node_id = node_config.id
node_info = node_config.all()
@@ -66,30 +52,36 @@ def run_process_manager(skale, skale_ima, node_config):
logger.info('Process manager procedure finished')
-def run_pm_schain(skale, skale_ima, node_config, schain: Dict) -> None:
- schain_record = upsert_schain_record(schain['name'])
- log_prefix = f'sChain {schain["name"]} -' # todo - move to logger formatter
-
- terminate_stuck_schain_process(skale, schain_record, schain)
- monitor_process_alive = is_monitor_process_alive(schain_record.monitor_id)
+def run_pm_schain(
+ skale: Skale,
+ skale_ima: SkaleIma,
+ node_config: NodeConfig,
+ schain: SchainStructure,
+ timeout: Optional[int] = None,
+) -> None:
+ log_prefix = f'sChain {schain.name} -'
- if not monitor_process_alive:
- logger.info(f'{log_prefix} PID {schain_record.monitor_id} is not running, spawning...')
+ if timeout is not None:
+ allowed_diff = timeout
+ else:
+ dkg_timeout = skale.constants_holder.get_dkg_timeout()
+ allowed_diff = timeout or int(dkg_timeout * DKG_TIMEOUT_COEFFICIENT)
+
+ pid, pts = get_schain_process_info(schain.name)
+ if pid is not None and is_monitor_process_alive(pid):
+ if int(time.time()) - pts > allowed_diff:
+ logger.info('%s Terminating process: PID = %d', log_prefix, pid)
+ terminate_process(pid)
+ else:
+ logger.info('%s Process is running: PID = %d', log_prefix, pid)
+ else:
process = Process(
- name=schain['name'],
- target=run_monitor_for_schain,
- args=(
- skale,
- skale_ima,
- node_config,
- schain
- )
+ name=schain.name,
+ target=start_tasks,
+ args=(skale, schain, node_config, skale_ima)
)
process.start()
- schain_record.set_monitor_id(process.ident)
- logger.info(f'{log_prefix} Process started: PID = {process.ident}')
- else:
- logger.info(f'{log_prefix} Process is running: PID = {schain_record.monitor_id}')
+ logger.info('Process started for %s', schain.name)
def fetch_schains_to_monitor(skale: Skale, node_id: int) -> list:
@@ -100,12 +92,19 @@ def fetch_schains_to_monitor(skale: Skale, node_id: int) -> list:
schains = skale.schains.get_schains_for_node(node_id)
leaving_schains = get_leaving_schains_for_node(skale, node_id)
schains.extend(leaving_schains)
- active_schains = list(filter(lambda schain: schain['active'], schains))
+ active_schains = list(filter(lambda schain: schain.active, schains))
schains_holes = len(schains) - len(active_schains)
logger.info(
- arguments_list_string({'Node ID': node_id, 'sChains on node': active_schains,
- 'Number of sChains on node': len(active_schains),
- 'Empty sChain structs': schains_holes}, 'Monitoring sChains'))
+ arguments_list_string(
+ {
+ 'Node ID': node_id,
+ 'sChains on node': active_schains,
+ 'Number of sChains on node': len(active_schains),
+ 'Empty sChain structs': schains_holes,
+ },
+ 'Monitoring sChains',
+ )
+ )
return active_schains
@@ -115,8 +114,8 @@ def get_leaving_schains_for_node(skale: Skale, node_id: int) -> list:
leaving_history = skale.node_rotation.get_leaving_history(node_id)
for leaving_schain in leaving_history:
schain = skale.schains.get(leaving_schain['schain_id'])
- if skale.node_rotation.is_rotation_active(schain['name']) and schain['name']:
- schain['active'] = True
+ if skale.node_rotation.is_rotation_active(schain.name) and schain.name:
+ schain.active = True
leaving_schains.append(schain)
logger.info(f'Got leaving sChains for the node: {leaving_schains}')
return leaving_schains
diff --git a/core/schains/process_manager_helper.py b/core/schains/process_manager_helper.py
deleted file mode 100644
index 2128c7ba1..000000000
--- a/core/schains/process_manager_helper.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# This file is part of SKALE Admin
-#
-# Copyright (C) 2021 SKALE Labs
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see .
-
-import logging
-import os
-import signal
-from datetime import datetime
-
-import psutil
-
-
-from tools.helper import check_pid
-
-
-logger = logging.getLogger(__name__)
-
-TIMEOUT_COEFFICIENT = 2.2
-P_KILL_WAIT_TIMEOUT = 60
-
-
-def terminate_stuck_schain_process(skale, schain_record, schain):
- """
- This function terminates the process if last_seen time is less than
- DKG timeout * TIMEOUT_COEFFICIENT
- """
- allowed_last_seen_time = _calc_allowed_last_seen_time(skale)
- if not schain_record.monitor_last_seen:
- logging.warning(f'schain: {schain["name"]}, monitor_last_seen is None, skipping...')
- return
- schain_monitor_last_seen = schain_record.monitor_last_seen.timestamp()
- if allowed_last_seen_time > schain_monitor_last_seen:
- logger.warning(f'schain: {schain["name"]}, pid {schain_record.monitor_id} last seen is \
-{schain_monitor_last_seen}, while max allowed last_seen is {allowed_last_seen_time}, pid \
-{schain_record.monitor_id} will be terminated now!')
- terminate_schain_process(schain_record)
-
-
-def terminate_schain_process(schain_record):
- log_msg = f'schain: {schain_record.name}'
- terminate_process(schain_record.monitor_id, log_msg=log_msg)
-
-
-def terminate_process(pid, kill_timeout=P_KILL_WAIT_TIMEOUT, log_msg=''):
- log_prefix = f'pid: {pid} - '
- if log_msg != '':
- log_prefix += f'{log_msg} - '
- if pid == 0:
- logger.warning(f'{log_prefix} - pid is 0, skipping')
- return
- try:
- logger.warning(f'{log_prefix} - going to terminate')
- p = psutil.Process(pid)
- os.kill(p.pid, signal.SIGTERM)
- p.wait(timeout=kill_timeout)
- logger.info(f'{log_prefix} was terminated')
- except psutil.NoSuchProcess:
- logger.info(f'{log_prefix} - no such process')
- except psutil.TimeoutExpired:
- logger.warning(f'{log_prefix} - timout expired, going to kill')
- p.kill()
- logger.info(f'{log_prefix} - process was killed')
- except Exception:
- logging.exception(f'{log_prefix} - termination failed!')
-
-
-def is_monitor_process_alive(monitor_id):
- """Checks that provided monitor_id is inited and alive"""
- return monitor_id != 0 and check_pid(monitor_id)
-
-
-def _calc_allowed_last_seen_time(skale):
- dkg_timeout = skale.constants_holder.get_dkg_timeout()
- allowed_diff = int(dkg_timeout * TIMEOUT_COEFFICIENT)
- logger.info(f'dkg_timeout: {dkg_timeout}, TIMEOUT_COEFFICIENT: {TIMEOUT_COEFFICIENT}, \
-allowed_diff: {allowed_diff}')
- return datetime.now().timestamp() - allowed_diff
diff --git a/core/schains/runner.py b/core/schains/runner.py
index e65aa6394..50b5c3867 100644
--- a/core/schains/runner.py
+++ b/core/schains/runner.py
@@ -19,8 +19,10 @@
import copy
import logging
+from typing import Optional
from docker.types import LogConfig, Ulimit
+from skale.contracts.manager.schains import SchainStructure
from core.schains.volume import get_schain_volume_config
from core.schains.limits import get_schain_limit, get_ima_limit, get_schain_type
@@ -161,13 +163,12 @@ def run_container(
def restart_container(
type,
- schain,
+ schain: SchainStructure,
timeout=SCHAIN_STOP_TIMEOUT,
dutils=None
):
dutils = dutils or DockerUtils()
- schain_name = schain['name']
- container_name = get_container_name(type, schain_name)
+ container_name = get_container_name(type, schain.name)
logger.info(arguments_list_string({'Container name': container_name},
'Restarting container...'))
@@ -176,19 +177,19 @@ def restart_container(
def run_schain_container(
- schain,
+ schain: SchainStructure,
download_snapshot=False,
start_ts=None,
dutils=None,
volume_mode=None,
ulimit_check=True,
enable_ssl=True,
- snapshot_from: str = '',
+ snapshot_from: Optional[str] = None,
sync_node=False,
historic_state=False
):
- schain_name = schain['name']
- schain_type = get_schain_type(schain['partOfNode'])
+ schain_name = schain.name
+ schain_type = get_schain_type(schain.part_of_node)
cpu_limit = None if sync_node else get_schain_limit(schain_type, MetricType.cpu_shares)
mem_limit = None if sync_node else get_schain_limit(schain_type, MetricType.mem)
@@ -224,22 +225,22 @@ def run_schain_container(
def run_ima_container(
- schain: dict,
+ schain: SchainStructure,
mainnet_chain_id: int,
time_frame: int,
image: str,
dutils: DockerUtils = None
) -> None:
dutils = dutils or DockerUtils()
- env = get_ima_env(schain['name'], mainnet_chain_id, time_frame)
+ env = get_ima_env(schain.name, mainnet_chain_id, time_frame)
- schain_type = get_schain_type(schain['partOfNode'])
+ schain_type = get_schain_type(schain.part_of_node)
cpu_limit = get_ima_limit(schain_type, MetricType.cpu_shares)
mem_limit = get_ima_limit(schain_type, MetricType.mem)
run_container(
image_type=IMA_CONTAINER,
- schain_name=schain['name'],
+ schain_name=schain.name,
env=env.to_dict(),
cpu_shares_limit=cpu_limit,
mem_limit=mem_limit,
diff --git a/core/schains/skaled_status.py b/core/schains/status.py
similarity index 65%
rename from core/schains/skaled_status.py
rename to core/schains/status.py
index 02186a4a9..2b40b2271 100644
--- a/core/schains/skaled_status.py
+++ b/core/schains/status.py
@@ -22,22 +22,52 @@
import logging
from json.decoder import JSONDecodeError
from typing import Optional
+from abc import ABCMeta, abstractmethod
-from core.schains.config.directory import skaled_status_filepath
+from core.schains.config.directory import node_cli_status_filepath, skaled_status_filepath
from tools.config_utils import config_getter, log_broken_status_file
from tools.helper import read_json
logger = logging.getLogger(__name__)
-class SkaledStatus:
- def __init__(self, filepath: str):
+class IStatus(metaclass=ABCMeta):
+ @abstractmethod
+ def __init__(self, filepath: str) -> None:
+ pass
+
+ @property
+ @abstractmethod
+ def filepath(self) -> str:
+ pass
+
+ @property
+ def all(self) -> dict:
+ if not os.path.isfile(self.filepath):
+ logger.warning("File %s is not found", self.filepath)
+ return
+ try:
+ return read_json(self.filepath)
+ except JSONDecodeError:
+ log_broken_status_file(self.filepath)
+ return {}
+
+ def log(self) -> None:
+ logger.info(f'{self.__class__.__name__}: \n' + json.dumps(self.all, indent=4))
+
+
+class SkaledStatus(IStatus):
+ def __init__(self, filepath: str) -> None:
"""
Read-only wrapper for skaled.status file, reads from the file each time.
Returns dict for top-level keys, True or False for second-level keys.
Returns None for all keys if file is not found.
"""
- self.filepath = filepath
+ self._filepath = filepath
+
+ @property
+ def filepath(self) -> str:
+ return self._filepath
@property
@config_getter
@@ -84,28 +114,48 @@ def start_from_snapshot(self) -> bool:
return
return exit_state['StartFromSnapshot']
+
+class NodeCliStatus(IStatus):
+ def __init__(self, filepath: str) -> None:
+ """
+ Read-only wrapper for node_cli.status file, reads from the file each time.
+ """
+ self._filepath = filepath
+
@property
- def all(self) -> dict:
- if not os.path.isfile(self.filepath):
- logger.warning("File %s is not found", self.filepath)
- return
- try:
- return read_json(self.filepath)
- except JSONDecodeError:
- log_broken_status_file(self.filepath)
- return {}
+ @config_getter
+ def repair_ts(self) -> int:
+ return 'repair_ts', self.filepath
- def log(self) -> None:
- logger.info('skaled status file: \n' + json.dumps(self.all, indent=4))
+ @property
+ @config_getter
+ def snapshot_from(self) -> int:
+ return 'snapshot_from', self.filepath
+
+ @property
+ def filepath(self) -> str:
+ return self._filepath
-def init_skaled_status(schain_name) -> SkaledStatus:
+def init_skaled_status(schain_name: str) -> SkaledStatus:
status_filepath = skaled_status_filepath(schain_name)
return SkaledStatus(status_filepath)
-def get_skaled_status(schain_name) -> Optional[SkaledStatus]:
+def get_skaled_status(schain_name: str) -> Optional[SkaledStatus]:
status_path = skaled_status_filepath(schain_name)
if os.path.isfile(status_path):
return SkaledStatus(status_path)
return None
+
+
+def init_node_cli_status(schain_name: str) -> SkaledStatus:
+ status_filepath = node_cli_status_filepath(schain_name)
+ return NodeCliStatus(status_filepath)
+
+
+def get_node_cli_status(schain_name: str) -> Optional[SkaledStatus]:
+ status_path = node_cli_status_filepath(schain_name)
+ if os.path.isfile(status_path):
+ return NodeCliStatus(status_path)
+ return None
diff --git a/core/schains/task.py b/core/schains/task.py
deleted file mode 100644
index b95a8eb92..000000000
--- a/core/schains/task.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import logging
-import time
-from concurrent.futures import Future, ThreadPoolExecutor
-from typing import Callable, List, Optional
-
-logger = logging.getLogger(__name__)
-
-
-class Task:
- def __init__(
- self,
- name: str,
- action: Callable,
- index: int = 0,
- sleep: int = 2
- ) -> None:
- self.name = name
- self.index = index
- self.action = action
- self.sleep = sleep
-
- def run(self) -> None:
- try:
- self.action()
- except Exception as e:
- logger.exception('Task %s failed with %s', self.name, e)
- logger.info('Sleeping after task execution for %d', self.sleep)
- time.sleep(self.sleep)
-
-
-def keep_tasks_running(
- executor: ThreadPoolExecutor,
- tasks: List[Task],
- futures: List[Optional[Future]]
-) -> None:
- for i, task in enumerate(tasks):
- future = futures[i]
- if future is not None and not future.running():
- result = future.result()
- logger.info('Task %s finished with %s', task.name, result)
- if future is None or not future.running():
- logger.info('Running task %s', task.name)
- futures[i] = executor.submit(task.run)
-
-
-def run_tasks(name: str, tasks: List[Task]) -> None:
- with ThreadPoolExecutor(max_workers=len(tasks), thread_name_prefix='T') as executor:
- futures: List[Optional[Future]] = [None for i in range(len(tasks))]
- while True:
- keep_tasks_running(executor, tasks, futures)
- time.sleep(30)
diff --git a/core/schains/volume.py b/core/schains/volume.py
index dbba93cd4..422a49bb0 100644
--- a/core/schains/volume.py
+++ b/core/schains/volume.py
@@ -21,6 +21,7 @@
import os
import shutil
+from skale.contracts.manager.schains import SchainStructure
from core.schains.limits import get_schain_limit, get_schain_type
from core.schains.types import MetricType
from tools.configs.schains import SCHAIN_STATE_PATH, SCHAIN_STATIC_PATH
@@ -45,24 +46,23 @@ def is_volume_exists(schain_name, sync_node=False, dutils=None):
def init_data_volume(
- schain: dict,
+ schain: SchainStructure,
sync_node: bool = False,
dutils: DockerUtils = None
):
dutils = dutils or DockerUtils()
- schain_name = schain['name']
- if is_volume_exists(schain_name, sync_node=sync_node, dutils=dutils):
- logger.debug(f'Volume already exists: {schain_name}')
+ if is_volume_exists(schain.name, sync_node=sync_node, dutils=dutils):
+ logger.debug(f'Volume already exists: {schain.name}')
return
- logger.info(f'Creating volume for schain: {schain_name}')
+ logger.info(f'Creating volume for schain: {schain.name}')
if sync_node:
- ensure_data_dir_path(schain['name'])
+ ensure_data_dir_path(schain.name)
else:
- schain_type = get_schain_type(schain['partOfNode'])
+ schain_type = get_schain_type(schain.part_of_node)
disk_limit = get_schain_limit(schain_type, MetricType.disk)
- dutils.create_data_volume(schain_name, disk_limit)
+ dutils.create_data_volume(schain.name, disk_limit)
def remove_data_dir(schain_name):
diff --git a/core/updates.py b/core/updates.py
index dba476e7b..9a93e4eb9 100644
--- a/core/updates.py
+++ b/core/updates.py
@@ -23,7 +23,9 @@
from core.node_config import NodeConfig
from core.ima.schain import update_predeployed_ima
-
+from core.schains.config.file_manager import ConfigFileManager
+from core.schains.cleaner import get_schains_on_node
+from tools.docker_utils import DockerUtils
logger = logging.getLogger(__name__)
@@ -56,3 +58,22 @@ def update_node_config_file(skale: Skale, node_config: NodeConfig) -> None:
node_config.ip = ip
if node_config.name != name:
node_config.name = name
+
+
+def update_unsafe_for_schains(
+ skale: Skale,
+ node_config: NodeConfig,
+ dutils: DockerUtils
+) -> list[str]:
+ schains_on_node = get_schains_on_node(dutils=dutils)
+ unsafe_chains = []
+ for schain_name in schains_on_node:
+ cfm = ConfigFileManager(schain_name=schain_name)
+ if skale.node_rotation.is_rotation_active(schain_name):
+ logger.info('Rotation is in progress for %s', schain_name)
+ unsafe_chains.append(schain_name)
+ # To handle the gap between SM finish ts and skaled exit time
+ elif cfm.skaled_config_exists() and not cfm.skaled_config_synced_with_upstream():
+ logger.info('Skaled config is not synced with upstream for %s', schain_name)
+ unsafe_chains.append(schain_name)
+ return unsafe_chains
diff --git a/gunicorn.conf.py b/gunicorn.conf.py
index e301f4843..51958e107 100644
--- a/gunicorn.conf.py
+++ b/gunicorn.conf.py
@@ -1,4 +1,4 @@
bind = "127.0.0.1:3007"
workers = 2
timeout = 1000
-loglevel = "debug"
+loglevel = "info"
diff --git a/requirements.txt b/requirements.txt
index 1bfa7b338..b8ff328cb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,7 +8,7 @@ Jinja2==3.1.2
docker==6.1.3
python-iptables==1.0.1
-skale.py==6.3
+skale.py==6.4b0
requests==2.31
ima-predeployed==2.1.0b0
diff --git a/scripts/helper.sh b/scripts/helper.sh
index c58a84519..7d35bec01 100644
--- a/scripts/helper.sh
+++ b/scripts/helper.sh
@@ -32,11 +32,9 @@ export_test_env () {
tests_cleanup () {
export_test_env
- docker rm -f skale_schain_test && docker volume rm test || true
- sudo rm -r tests/skale-data/lib || true
+ rm -r tests/skale-data/lib || true
rm tests/skale-data/node_data/node_config.json || true
docker rm -f sgx-simulator || true
- docker rm -f skale_schain_test1 skale_schain_test2 skale_schain_test3 || true
find . -name \*.pyc -delete || true
mkdir -p $SGX_CERTIFICATES_FOLDER || true
rm -rf $SGX_CERTIFICATES_FOLDER/sgx.* || true
diff --git a/tests/conftest.py b/tests/conftest.py
index 8b34c172f..973a375e7 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -15,9 +15,7 @@
from skale import SkaleManager
from skale.wallets import Web3Wallet
from skale.utils.account_tools import generate_account, send_eth
-from skale.utils.contracts_provision.fake_multisig_contract import (
- deploy_fake_multisig_contract
-)
+from skale.utils.contracts_provision.fake_multisig_contract import deploy_fake_multisig_contract
from skale.utils.contracts_provision.main import (
add_test_permissions,
add_test2_schain_type,
@@ -27,7 +25,7 @@
create_nodes,
create_schain,
link_nodes_to_validator,
- setup_validator
+ setup_validator,
)
from skale.utils.web3_utils import init_web3
@@ -38,16 +36,27 @@
from core.schains.config.helper import (
get_base_port_from_config,
get_node_ips_from_config,
- get_own_ip_from_config
+ get_own_ip_from_config,
)
from core.schains.config.directory import schain_config_dir, skaled_status_filepath
from core.schains.cleaner import remove_schain_container, remove_schain_volume
from core.schains.ima import ImaData
from core.schains.external_config import ExternalConfig, ExternalState
-from core.schains.skaled_status import init_skaled_status, SkaledStatus
+from core.schains.status import (
+ init_node_cli_status,
+ init_skaled_status,
+ node_cli_status_filepath,
+ SkaledStatus,
+)
from core.schains.config.skale_manager_opts import SkaleManagerOpts
-from tools.configs import CONFIG_FOLDER, ENV_TYPE, META_FILEPATH, SSL_CERTIFICATES_FILEPATH
+from tools.configs import (
+ CONFIG_FOLDER,
+ ENV_TYPE,
+ META_FILEPATH,
+ SSL_CERTIFICATES_FILEPATH,
+ STATIC_GROUPS_FOLDER
+)
from tools.configs.containers import CONTAINERS_FILEPATH
from tools.configs.ima import SCHAIN_IMA_ABI_FILEPATH
from tools.configs.schains import SCHAINS_DIR_PATH
@@ -60,16 +69,18 @@
from tests.utils import (
ALLOWED_RANGES,
CONFIG_STREAM,
+ CURRENT_TS,
ENDPOINT,
ETH_AMOUNT_PER_NODE,
ETH_PRIVATE_KEY,
+ STATIC_NODE_GROUPS,
generate_cert,
generate_schain_config,
get_test_rule_controller,
IMA_MIGRATION_TS,
init_skale_from_wallet,
init_skale_ima,
- upsert_schain_record_with_config
+ upsert_schain_record_with_config,
)
NUMBER_OF_NODES = 2
@@ -81,14 +92,8 @@ def images():
cinfo = {}
with open(CONTAINERS_FILEPATH, 'r') as cf:
json.load(cinfo, cf)
- schain_image = '{}/{}'.format(
- cinfo['schain']['name'],
- cinfo['schain']['version']
- )
- ima_image = '{}/{}'.format(
- cinfo['ima']['name'],
- cinfo['ima']['version']
- )
+ schain_image = '{}/{}'.format(cinfo['schain']['name'], cinfo['schain']['version'])
+ ima_image = '{}/{}'.format(cinfo['ima']['name'], cinfo['ima']['version'])
dclient.images.pull(schain_image)
dclient.images.pull(ima_image)
@@ -104,14 +109,14 @@ def predeployed_ima():
@pytest.fixture(scope='session')
def web3():
- """ Returns a SKALE Manager instance with provider from config """
+ """Returns a SKALE Manager instance with provider from config"""
w3 = init_web3(ENDPOINT)
return w3
@pytest.fixture(scope='session')
def skale(web3):
- """ Returns a SKALE Manager instance with provider from config """
+ """Returns a SKALE Manager instance with provider from config"""
wallet = Web3Wallet(ETH_PRIVATE_KEY, web3)
skale_obj = init_skale_from_wallet(wallet)
add_test_permissions(skale_obj)
@@ -139,7 +144,7 @@ def node_wallets(skale):
web3=skale.web3,
wallet=skale.wallet,
receiver_address=wallet.address,
- amount=ETH_AMOUNT_PER_NODE
+ amount=ETH_AMOUNT_PER_NODE,
)
wallets.append(wallet)
return wallets
@@ -147,10 +152,7 @@ def node_wallets(skale):
@pytest.fixture
def node_skales(skale, node_wallets):
- return [
- SkaleManager(ENDPOINT, ABI_FILEPATH, wallet)
- for wallet in node_wallets
- ]
+ return [SkaleManager(ENDPOINT, ABI_FILEPATH, wallet) for wallet in node_wallets]
@pytest.fixture
@@ -171,10 +173,7 @@ def skale_ima():
@pytest.fixture
def ssl_folder():
- pathlib.Path(SSL_CERTIFICATES_FILEPATH).mkdir(
- parents=True,
- exist_ok=True
- )
+ pathlib.Path(SSL_CERTIFICATES_FILEPATH).mkdir(parents=True, exist_ok=True)
try:
yield SSL_CERTIFICATES_FILEPATH
finally:
@@ -203,63 +202,63 @@ def get_skaled_status_dict(
exit_time_reached=False,
clear_data_dir=False,
start_from_snapshot=False,
- start_again=False
+ start_again=False,
):
return {
- "subsystemRunning": {
- "SnapshotDownloader": snapshot_downloader,
- "Blockchain": False,
- "Rpc": False
+ 'subsystemRunning': {
+ 'SnapshotDownloader': snapshot_downloader,
+ 'Blockchain': False,
+ 'Rpc': False,
+ },
+ 'exitState': {
+ 'ClearDataDir': clear_data_dir,
+ 'StartAgain': start_again,
+ 'StartFromSnapshot': start_from_snapshot,
+ 'ExitTimeReached': exit_time_reached,
},
- "exitState": {
- "ClearDataDir": clear_data_dir,
- "StartAgain": start_again,
- "StartFromSnapshot": start_from_snapshot,
- "ExitTimeReached": exit_time_reached
- }
}
SECRET_KEY = {
- "common_public_key": [
+ 'common_public_key': [
11111111111111111111111111111111111111111111111111111111111111111111111111111,
1111111111111111111111111111111111111111111111111111111111111111111111111111,
1111111111111111111111111111111111111111111111111111111111111111111111111111,
- 11111111111111111111111111111111111111111111111111111111111111111111111111111
+ 11111111111111111111111111111111111111111111111111111111111111111111111111111,
],
- "public_key": [
- "1111111111111111111111111111111111111111111111111111111111111111111111111111",
- "1111111111111111111111111111111111111111111111111111111111111111111111111111",
- "1111111111111111111111111111111111111111111111111111111111111111111111111111",
- "11111111111111111111111111111111111111111111111111111111111111111111111111111"
+ 'public_key': [
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111',
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111',
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111',
+ '11111111111111111111111111111111111111111111111111111111111111111111111111111',
],
- "bls_public_keys": [
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111", # noqa
- "1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111" # noqa
+ 'bls_public_keys': [
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
+ '1111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111:11111111111111111111111111111111111111111111111111111111111111111111111111111', # noqa
],
- "t": 11,
- "n": 16,
- "key_share_name": "BLS_KEY:SCHAIN_ID:33333333333333333333333333333333333333333333333333333333333333333333333333333:NODE_ID:0:DKG_ID:0" # noqa
+ 't': 11,
+ 'n': 16,
+ 'key_share_name': 'BLS_KEY:SCHAIN_ID:33333333333333333333333333333333333333333333333333333333333333333333333333333:NODE_ID:0:DKG_ID:0', # noqa
}
@pytest.fixture
def _schain_name():
- """ Generates default schain name """
+ """Generates default schain name"""
return get_random_string()
@@ -295,8 +294,7 @@ def secret_keys(_schain_name):
@pytest.fixture
def schain_config(_schain_name, secret_key, predeployed_ima):
schain_dir_path = os.path.join(SCHAINS_DIR_PATH, _schain_name)
- config_path = os.path.join(schain_dir_path,
- f'schain_{_schain_name}.json')
+ config_path = os.path.join(schain_dir_path, f'schain_{_schain_name}.json')
try:
pathlib.Path(schain_dir_path).mkdir(parents=True, exist_ok=True)
schain_config = generate_schain_config(_schain_name)
@@ -349,8 +347,7 @@ def skaled_status_exit_time_reached(_schain_name):
@pytest.fixture
def skaled_status_repair(_schain_name):
- generate_schain_skaled_status_file(
- _schain_name, clear_data_dir=True, start_from_snapshot=True)
+ generate_schain_skaled_status_file(_schain_name, clear_data_dir=True, start_from_snapshot=True)
try:
yield init_skaled_status(_schain_name)
finally:
@@ -371,7 +368,7 @@ def skaled_status_broken_file(_schain_name):
schain_dir_path = os.path.join(SCHAINS_DIR_PATH, _schain_name)
pathlib.Path(schain_dir_path).mkdir(parents=True, exist_ok=True)
status_filepath = skaled_status_filepath(_schain_name)
- with open(status_filepath, "w") as text_file:
+ with open(status_filepath, 'w') as text_file:
text_file.write('abcd')
try:
yield SkaledStatus(status_filepath)
@@ -390,18 +387,14 @@ def db():
@pytest.fixture
def schain_db(db, _schain_name, meta_file):
- """ Database with default schain inserted """
+ """Database with default schain inserted"""
upsert_schain_record_with_config(_schain_name)
return _schain_name
@pytest.fixture
def meta_file():
- meta_info = {
- "version": "0.0.0",
- "config_stream": CONFIG_STREAM,
- "docker_lvmpy_stream": "1.1.1"
- }
+ meta_info = {'version': '0.0.0', 'config_stream': CONFIG_STREAM, 'docker_lvmpy_stream': '1.1.1'}
with open(META_FILEPATH, 'w') as meta_file:
json.dump(meta_info, meta_file)
try:
@@ -416,7 +409,7 @@ def schain_on_contracts(skale, nodes, _schain_name):
yield create_schain(
skale,
schain_type=1, # test2 should have 1 index
- schain_name=_schain_name
+ schain_name=_schain_name,
)
finally:
cleanup_nodes_schains(skale)
@@ -424,25 +417,14 @@ def schain_on_contracts(skale, nodes, _schain_name):
@pytest.fixture
def dutils():
- return DockerUtils(
- volume_driver='local',
- host='unix://var/run/docker.sock'
- )
+ return DockerUtils(volume_driver='local', host='unix://var/run/docker.sock')
@pytest.fixture
def skaled_mock_image(scope='module'):
- dutils = DockerUtils(
- volume_driver='local',
- host='unix://var/run/docker.sock'
- )
+ dutils = DockerUtils(volume_driver='local', host='unix://var/run/docker.sock')
name = 'skaled-mock'
- dutils.client.images.build(
- tag=name,
- rm=True,
- nocache=True,
- path='tests/skaled-mock'
- )
+ dutils.client.images.build(tag=name, rm=True, nocache=True, path='tests/skaled-mock')
yield name
dutils.client.images.remove(name, force=True)
@@ -515,18 +497,14 @@ def schain_checks(schain_config, schain_db, current_nodes, rule_controller, esta
current_nodes=current_nodes,
last_dkg_successful=True,
estate=estate,
- dutils=dutils
+ dutils=dutils,
)
@pytest.fixture
def schain_struct(schain_config):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
- return {
- 'name': schain_name,
- 'partOfNode': 0,
- 'generation': 0
- }
+ return {'name': schain_name, 'partOfNode': 0, 'generation': 0}
@pytest.fixture
@@ -540,10 +518,7 @@ def rule_controller(_schain_name, schain_db, schain_config):
own_ip = get_own_ip_from_config(schain_config)
node_ips = get_node_ips_from_config(schain_config)
return get_test_rule_controller(
- name=_schain_name,
- base_port=base_port,
- own_ip=own_ip,
- node_ips=node_ips
+ name=_schain_name, base_port=base_port, own_ip=own_ip, node_ips=node_ips
)
@@ -560,10 +535,7 @@ def uninited_rule_controller(_schain_name):
@pytest.fixture
def skale_manager_opts():
- return SkaleManagerOpts(
- schains_internal_address='0x1656',
- nodes_address='0x7742'
- )
+ return SkaleManagerOpts(schains_internal_address='0x1656', nodes_address='0x7742')
@pytest.fixture
@@ -580,11 +552,7 @@ def new_upstream(schain_db):
@pytest.fixture
def estate(skale):
- return ExternalState(
- ima_linked=True,
- chain_id=skale.web3.eth.chain_id,
- ranges=ALLOWED_RANGES
- )
+ return ExternalState(ima_linked=True, chain_id=skale.web3.eth.chain_id, ranges=ALLOWED_RANGES)
@pytest.fixture
@@ -610,7 +578,7 @@ def upstreams(schain_db, schain_config):
f'schain_{name}_9_1687183335.json',
f'schain_{name}_11_1687183336.json',
f'schain_{name}_11_1687183337.json',
- f'schain_{name}_11_1687183339.json'
+ f'schain_{name}_11_1687183339.json',
]
try:
for fname in files:
@@ -632,3 +600,34 @@ def ima_migration_schedule(schain_db):
yield migration_schedule_path
finally:
os.remove(migration_schedule_path)
+
+
+@pytest.fixture
+def static_groups_for_schain(_schain_name):
+ parent_folder = os.path.join(STATIC_GROUPS_FOLDER, ENV_TYPE)
+ os.makedirs(parent_folder)
+ static_groups_env_path = os.path.join(
+ parent_folder,
+ os.path.join(f'schain-{_schain_name}.json')
+ )
+ try:
+ write_json(static_groups_env_path, STATIC_NODE_GROUPS)
+ yield STATIC_NODE_GROUPS
+ finally:
+ shutil.rmtree(STATIC_GROUPS_FOLDER, ignore_errors=True)
+
+
+NCLI_STATUS_DICT = {'repair_ts': CURRENT_TS, 'snapshot_from': '127.0.0.1'}
+
+
+@pytest.fixture
+def ncli_status(_schain_name):
+ schain_dir_path = os.path.join(SCHAINS_DIR_PATH, _schain_name)
+ pathlib.Path(schain_dir_path).mkdir(parents=True, exist_ok=True)
+ ncli_status_path = node_cli_status_filepath(_schain_name)
+ write_json(ncli_status_path, NCLI_STATUS_DICT)
+
+ try:
+ yield init_node_cli_status(_schain_name)
+ finally:
+ shutil.rmtree(schain_dir_path, ignore_errors=True)
diff --git a/tests/db_test.py b/tests/db_test.py
index 40ede0ca5..9c0f7417d 100644
--- a/tests/db_test.py
+++ b/tests/db_test.py
@@ -7,8 +7,6 @@
get_schains_statuses,
mark_schain_deleted,
set_schains_first_run,
- switch_off_repair_mode,
- toggle_schain_repair_mode,
SChainRecord,
upsert_schain_record
)
@@ -67,44 +65,6 @@ def test_schains_first_run(db, upsert_db):
SChainRecord.first_run == True).count() == RECORDS_NUMBER # noqa: E712
-def test_toggle_repair_mode(db, upsert_db):
- result = toggle_schain_repair_mode('schain-0')
- assert result
- assert SChainRecord.select().where(
- SChainRecord.repair_mode == True).count() == 1 # noqa: E712
- cursor = SChainRecord.select().where(
- SChainRecord.repair_mode == True).execute() # noqa: E712
- records = list(cursor)
- assert len(records) == 1
- assert records[0].name == 'schain-0'
- assert records[0].snapshot_from == ''
-
- result = toggle_schain_repair_mode('schain-0', '1.1.1.1')
- cursor = SChainRecord.select().where(
- SChainRecord.repair_mode == True).execute() # noqa: E712
- records = list(cursor)
- assert len(records) == 1
- assert records[0].name == 'schain-0'
- assert records[0].snapshot_from == '1.1.1.1'
-
- switch_off_repair_mode('schain-0')
- assert SChainRecord.select().where(
- SChainRecord.repair_mode == True).count() == 0 # noqa: E712
- cursor = SChainRecord.select().where(
- SChainRecord.name == 'schain-0').execute() # noqa: E712
- records = list(cursor)
- assert records[0].name == 'schain-0'
- assert not records[0].repair_mode
- assert records[0].snapshot_from == ''
-
-
-def test_toggle_repair_mode_schain_not_exists(db, upsert_db):
- result = toggle_schain_repair_mode('undefined-schain')
- assert not result
- assert SChainRecord.select().where(
- SChainRecord.repair_mode == True).count() == 0 # noqa: E712
-
-
def test_get_schains_names(db, upsert_db):
mark_schain_deleted('schain-0')
result = get_schains_names()
diff --git a/tests/docker_utils_test.py b/tests/docker_utils_test.py
index 731d7baa0..48320d796 100644
--- a/tests/docker_utils_test.py
+++ b/tests/docker_utils_test.py
@@ -14,7 +14,7 @@
get_container_info
)
from tests.utils import (
- get_schain_contracts_data,
+ get_schain_struct,
run_simple_schain_container,
run_simple_schain_container_in_sync_mode
)
@@ -104,7 +104,7 @@ def test_run_schain_container(
skaled_mock_image
):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
- schain_data = get_schain_contracts_data(schain_name)
+ schain_data = get_schain_struct(schain_name)
# Run schain container
run_simple_schain_container(schain_data, dutils)
@@ -123,7 +123,7 @@ def test_run_schain_container_sync(
cert_key_pair
):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
- schain_data = get_schain_contracts_data(schain_name)
+ schain_data = get_schain_struct(schain_name)
run_schain_container(
schain_data,
@@ -165,7 +165,7 @@ def test_run_schain_container_in_sync_mode(
skaled_mock_image
):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
- schain_data = get_schain_contracts_data(schain_name)
+ schain_data = get_schain_struct(schain_name)
# Run schain container
run_simple_schain_container_in_sync_mode(schain_data, dutils)
@@ -320,7 +320,7 @@ def test_get_container_image_name(
skaled_mock_image
):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
- schain_data = get_schain_contracts_data(schain_name)
+ schain_data = get_schain_struct(schain_name)
# Run schain container
run_simple_schain_container(schain_data, dutils)
diff --git a/tests/migrations_test.py b/tests/migrations_test.py
index c744e6250..4a084919f 100644
--- a/tests/migrations_test.py
+++ b/tests/migrations_test.py
@@ -16,7 +16,8 @@
add_monitor_id_field,
add_config_version_field,
add_restart_count_field,
- add_ssl_change_date_field
+ add_ssl_change_date_field,
+ add_repair_date_field
)
@@ -118,3 +119,9 @@ def test_add_ssl_change_date_field(upserted_db, migrator, model):
add_ssl_change_date_field(upserted_db, migrator)
for r in model.select().execute():
r.ssl_change_date < datetime.now()
+
+
+def test_add_repair_date_field(upserted_db, migrator, model):
+ add_repair_date_field(upserted_db, migrator)
+ for r in model.select().execute():
+ r.repair_date < datetime.now()
diff --git a/tests/monitoring_test.py b/tests/monitoring_test.py
index 79c1dab33..64d7e17dd 100644
--- a/tests/monitoring_test.py
+++ b/tests/monitoring_test.py
@@ -67,7 +67,7 @@ def test_update_telegraf_service(docker_node_config, telegraf_template, cleanup_
with open(TELEGRAF_CONFIG_PATH) as config:
config = config.read()
assert (
- config == '\n[agent]\n interval = "60s"\n hostname = "1.1.1.1"\n omit_hostname = false\n\n[global_tags]\n node_id = "1"\n\n[[outputs.db]]\n alias = "db"\n urls = ["http://127.0.0.1:1231"]\n') # noqa
+ config == '\n[agent]\n interval = "60s"\n hostname = "1.1.1.1"\n omit_hostname = false\n\n[global_tags]\n node_id = "1"\n\n[[outputs.db]]\n alias = "db"\n urls = ["http://127.0.0.1:1231"]\n') # noqa
assert dutils.is_container_running('skale_telegraf')
user_info = dutils.get_info('skale_telegraf')['stats']['Config']['User']
assert user_info == f'telegraf:{DOCKER_GROUP_ID}'
diff --git a/tests/routes/health_test.py b/tests/routes/health_test.py
index 0fe254ba2..cb36bb87b 100644
--- a/tests/routes/health_test.py
+++ b/tests/routes/health_test.py
@@ -14,7 +14,7 @@
from web.routes.health import health_bp
from web.helper import get_api_url
-from tests.utils import get_bp_data, run_custom_schain_container
+from tests.utils import get_bp_data, get_schain_struct, run_custom_schain_container
TEST_SGX_KEYNAME = 'test_keyname'
@@ -104,9 +104,9 @@ def __init__(self, *args, **kwargs):
def get_schains_for_node_mock(self, node_id):
return [
- {'name': schain_name},
- {'name': 'test-schain'},
- {'name': ''}
+ get_schain_struct(schain_name=schain_name),
+ get_schain_struct(schain_name='test-schain'),
+ get_schain_struct(schain_name=''),
]
with mock.patch('web.routes.health.SChainChecks', SChainChecksMock):
@@ -159,8 +159,8 @@ def test_sgx(skale_bp, skale):
assert data == {
'payload': {
'sgx_server_url': SGX_SERVER_URL,
- 'status': 0,
- 'status_name': 'CONNECTED',
+ 'status_zmq': True,
+ 'status_https': True,
'sgx_wallet_version': version,
'sgx_keyname': TEST_SGX_KEYNAME,
},
diff --git a/tests/routes/node_test.py b/tests/routes/node_test.py
index 61e351d6b..c0bc291a0 100644
--- a/tests/routes/node_test.py
+++ b/tests/routes/node_test.py
@@ -14,11 +14,13 @@
from core.node import Node, NodeStatus
from core.node_config import NodeConfig
-from tests.utils import get_bp_data, post_bp_data
+from core.schains.config.file_manager import ConfigFileManager
from tools.configs.tg import TG_API_KEY, TG_CHAT_ID
from web.routes.node import node_bp
from web.helper import get_api_url
+from tests.utils import get_bp_data, post_bp_data
+
CURRENT_TIMESTAMP = 1594903080
CURRENT_DATETIME = datetime.datetime.utcfromtimestamp(CURRENT_TIMESTAMP)
@@ -27,7 +29,7 @@
@pytest.fixture
-def skale_bp(skale, dutils):
+def skale_bp(skale, node_config, dutils):
app = Flask(__name__)
app.register_blueprint(node_bp)
@@ -40,32 +42,14 @@ def handler(sender, **kwargs):
yield app.test_client()
-@pytest.fixture
-def node_contracts(skale):
- ip, public_ip, port, name = generate_random_node_data()
- skale.manager.create_node(ip, port, name,
- domain_name=DEFAULT_DOMAIN_NAME, wait_for=True)
- node_id = skale.nodes.node_name_to_index(name)
- yield node_id
- skale.nodes.init_exit(node_id)
- skale.manager.node_exit(node_id, wait_for=True)
-
-
-@pytest.fixture
-def node_config(node_contracts):
- config = NodeConfig()
- config.id = node_contracts
- return config
-
-
-def test_node_info(skale_bp, skale, node_config):
+def test_node_info(skale_bp, skale, node_config, node_wallets):
data = get_bp_data(skale_bp, get_api_url(BLUEPRINT_NAME, 'info'))
status = NodeStatus.ACTIVE.value
assert data['status'] == 'ok'
node_info = data['payload']['node_info']
assert node_info['id'] == node_config.id
assert node_info['status'] == status
- assert to_checksum_address(node_info['owner']) == skale.wallet.address
+ assert to_checksum_address(node_info['owner']) == node_wallets[0].address
def register_mock(self, ip, public_ip, port, name, domain_name, gas_limit=None,
@@ -272,3 +256,32 @@ def test_exit_maintenance(skale_bp, node_config_in_maintenance):
)
assert data['status'] == 'error'
data['payload'] == {}
+
+
+def test_update_safe(skale, schain_on_contracts, schain_config, upstreams, skale_bp):
+ data = get_bp_data(
+ skale_bp,
+ get_api_url(BLUEPRINT_NAME, 'update-safe'),
+ )
+ assert data['status'] == 'ok'
+ assert data['payload'] == {'update_safe': True, 'unsafe_chains': []}
+
+ with mock.patch('web.helper.init_skale', return_value=skale):
+ with mock.patch.object(skale.node_rotation, 'is_rotation_active', return_value=False):
+ skale.node_rotation.is_rotation_active = mock.Mock(return_value=True)
+ data = get_bp_data(
+ skale_bp,
+ get_api_url(BLUEPRINT_NAME, 'update-safe'),
+ )
+ assert data['payload'] == {'update_safe': False, 'unsafe_chains': [schain_on_contracts]}
+
+ cfm = ConfigFileManager(schain_on_contracts)
+
+ cfm.save_skaled_config({})
+
+ data = get_bp_data(
+ skale_bp,
+ get_api_url(BLUEPRINT_NAME, 'update-safe'),
+ )
+
+ assert data['payload'] == {'update_safe': False, 'unsafe_chains': [schain_on_contracts]}
diff --git a/tests/routes/schains_test.py b/tests/routes/schains_test.py
index 329987985..fdede93dd 100644
--- a/tests/routes/schains_test.py
+++ b/tests/routes/schains_test.py
@@ -10,7 +10,7 @@
from core.node_config import NodeConfig
from core.schains.config.file_manager import ConfigFileManager
-from tests.utils import get_bp_data, get_test_rule_controller, post_bp_data
+from tests.utils import get_bp_data, get_test_rule_controller
from web.models.schain import SChainRecord, upsert_schain_record
from web.routes.schains import schains_bp
from web.helper import get_api_url
@@ -97,39 +97,6 @@ def test_firewall_rules_route(skale_bp, schain_config):
}
-def test_enable_repair_mode(skale_bp, schain_db):
- schain_name = schain_db
- data = post_bp_data(skale_bp, get_api_url(BLUEPRINT_NAME, 'repair'),
- params={'schain_name': schain_name})
- assert data == {
- 'payload': {},
- 'status': 'ok'
- }
- r = upsert_schain_record(schain_name)
- assert r.repair_mode
- assert r.snapshot_from == ''
-
- data = post_bp_data(
- skale_bp,
- get_api_url(BLUEPRINT_NAME, 'repair'),
- params={'schain_name': schain_name, 'snapshot_from': '1.1.1.1'}
- )
- assert data == {
- 'payload': {},
- 'status': 'ok'
- }
- r = upsert_schain_record(schain_name)
- assert r.repair_mode
- assert r.snapshot_from == '1.1.1.1'
-
- data = post_bp_data(skale_bp, get_api_url(BLUEPRINT_NAME, 'repair'),
- params={'schain_name': 'undefined-schain'})
- assert data == {
- 'payload': 'No schain with name undefined-schain',
- 'status': 'error'
- }
-
-
def test_get_schain(
skale_bp,
skale,
@@ -172,8 +139,8 @@ def test_get_schain(
def test_schain_containers_versions(skale_bp):
- expected_skaled_version = '3.16.1'
- expected_ima_version = '2.0.0-beta.9'
+ expected_skaled_version = '3.19.0'
+ expected_ima_version = '2.1.0'
data = get_bp_data(skale_bp, get_api_url(
BLUEPRINT_NAME, 'container-versions'))
assert data == {
diff --git a/tests/schain_allocation.py b/tests/schain_allocation.py
new file mode 100644
index 000000000..caae6aacc
--- /dev/null
+++ b/tests/schain_allocation.py
@@ -0,0 +1,204 @@
+# -*- coding: utf-8 -*-
+#
+# This file is part of SKALE.py
+#
+# Copyright (C) 2021-Present SKALE Labs
+#
+# SKALE.py is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SKALE.py is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with SKALE.py. If not, see .
+
+import os
+import yaml
+import math
+
+
+def calc_disk_factor(divider, decimals=3):
+ factor = 10 ** decimals
+ disk_factor_raw = 1 - (1 / (divider + 1))
+ return math.floor(disk_factor_raw * factor) / factor
+
+
+LARGE_DIVIDER = 1
+MEDIUM_DIVIDER = 8
+TEST_DIVIDER = 8
+SMALL_DIVIDER = 128
+
+VOLUME_CHUNK = 512 * SMALL_DIVIDER
+DISK_FACTOR = calc_disk_factor(MEDIUM_DIVIDER)
+
+
+class Alloc:
+ def to_dict(self):
+ return self.values
+
+
+class ResourceAlloc(Alloc):
+ def __init__(self, value, fractional=False):
+ self.values = {
+ 'test4': value / TEST_DIVIDER,
+ 'test': value / TEST_DIVIDER,
+ 'small': value / SMALL_DIVIDER,
+ 'medium': value / MEDIUM_DIVIDER,
+ 'large': value / LARGE_DIVIDER
+ }
+ if not fractional:
+ for k in self.values:
+ self.values[k] = int(self.values[k])
+
+
+class DiskResourceAlloc(Alloc):
+ def __init__(self, value, fractional=False):
+ self.values = {
+ 'test4': value / TEST_DIVIDER,
+ 'test': value / TEST_DIVIDER,
+ 'small': value / SMALL_DIVIDER,
+ 'medium': value / MEDIUM_DIVIDER,
+ 'large': value / LARGE_DIVIDER
+ }
+ if not fractional:
+ for k in self.values:
+ self.values[k] = int(self.values[k])
+
+
+class SChainVolumeAlloc(Alloc):
+ def __init__(self, disk_alloc_dict: dict, proportions: dict):
+ self.values = {}
+ for size_name in disk_alloc_dict:
+ self.values[size_name] = {}
+ for allocation_type, distribution in proportions.items():
+ self.values[size_name][allocation_type] = {}
+ for key, value in distribution.items():
+ lim = int(value * disk_alloc_dict[size_name])
+ self.values[size_name][allocation_type].update({key: lim})
+
+
+class LevelDBAlloc(Alloc):
+ def __init__(self, disk_alloc_dict: dict, proportions: dict):
+ self.values = {}
+ for size_name in disk_alloc_dict:
+ self.values[size_name] = {}
+ for allocation_type, limits in disk_alloc_dict[size_name].items():
+ self.values[size_name][allocation_type] = {}
+ for key, value in proportions.items():
+ lim = int(value * limits['max_skaled_leveldb_storage_bytes']) # noqa
+ self.values[size_name][allocation_type][key] = lim
+
+
+def calculate_free_disk_space(disk_size: int) -> int:
+ return int(disk_size * DISK_FACTOR) // VOLUME_CHUNK * VOLUME_CHUNK
+
+
+def calculate_shared_space_size(disk_size: int, shared_space_coefficient: float) -> int:
+ return int(disk_size * (1 - DISK_FACTOR) * shared_space_coefficient) // VOLUME_CHUNK * VOLUME_CHUNK # noqa
+
+
+def safe_load_yaml(filepath):
+ with open(filepath, 'r') as stream:
+ try:
+ return yaml.safe_load(stream)
+ except yaml.YAMLError as exc:
+ print(exc)
+
+
+def save_yaml(filepath, data, comments=None):
+ with open(filepath, 'w') as outfile:
+ if comments:
+ outfile.write(comments)
+ yaml.dump(data, outfile, default_flow_style=False)
+
+
+def generate_disk_alloc(configs: dict,
+ env_type_name: str,
+ schain_allocation: dict) -> DiskResourceAlloc:
+ """Generates disk allocation for the provided env type"""
+ disk_size_bytes = configs['envs'][env_type_name]['server']['disk'] # noqa
+ free_disk_space = calculate_free_disk_space(disk_size_bytes)
+ disk_alloc = DiskResourceAlloc(free_disk_space)
+ schain_allocation[env_type_name]['disk'] = disk_alloc.to_dict()
+ return disk_alloc
+
+
+def generate_volume_alloc(configs: dict, env_type_name: str,
+ schain_allocation: dict,
+ disk_alloc: ResourceAlloc) -> SChainVolumeAlloc:
+ """Generates volume partitioning """
+ """for the provided env type and disk allocation"""
+ proportions = configs['common']['schain']['volume_limits']
+ volume_alloc = SChainVolumeAlloc(disk_alloc.to_dict(), proportions)
+ schain_allocation[env_type_name]['volume_limits'] = volume_alloc.to_dict()
+ return volume_alloc
+
+
+def generate_leveldb_alloc(configs: dict,
+ env_type_name: str, schain_allocation: dict,
+ volume_alloc: SChainVolumeAlloc) -> LevelDBAlloc:
+ """Generates LevelDB partitioning """
+ """for the provided env type and volume partitioning"""
+ leveldb_proportions = configs['common']['schain']['leveldb_limits']
+ leveldb_alloc = LevelDBAlloc(volume_alloc.to_dict(), leveldb_proportions)
+ schain_allocation[env_type_name]['leveldb_limits'] = leveldb_alloc.to_dict()
+ return leveldb_alloc
+
+
+def generate_shared_space_value(
+ configs: dict,
+ env_type_name: str,
+ schain_allocation: dict
+) -> int:
+ disk_size_bytes = configs['envs'][env_type_name]['server']['disk'] # noqa
+
+ shared_space_coefficient = configs['common']['schain']['shared_space_coefficient'] # noqa
+ shared_space_size_bytes = calculate_shared_space_size(disk_size_bytes, shared_space_coefficient)
+
+ schain_allocation[env_type_name]['shared_space'] = shared_space_size_bytes # noqa
+ return shared_space_size_bytes
+
+
+def generate_schain_allocation(skale_node_path: str) -> dict:
+ configs_filepath = os.path.join(skale_node_path, 'static_params.yaml')
+ configs = safe_load_yaml(configs_filepath)
+
+ schain_allocation = {}
+ for env_type_name in configs['envs']:
+ schain_allocation[env_type_name] = {}
+ disk_alloc = generate_disk_alloc(
+ configs, env_type_name, schain_allocation)
+ volume_alloc = generate_volume_alloc(
+ configs, env_type_name, schain_allocation, disk_alloc)
+ generate_leveldb_alloc(
+ configs, env_type_name, schain_allocation, volume_alloc)
+ generate_shared_space_value(
+ configs, env_type_name, schain_allocation)
+
+ return schain_allocation
+
+
+def save_allocation(allocation: dict, allocation_filepath: str) -> None:
+ save_yaml(
+ filepath=allocation_filepath,
+ data=allocation,
+ comments='# DO NOT MODIFY THIS FILE MANUALLY!\n# Use generate_schain_allocation.py script from helper-scripts repo.\n\n' # noqa
+ )
+
+
+def main():
+ skale_node_path = os.environ['SKALE_NODE_PATH']
+ allocation = generate_schain_allocation(skale_node_path)
+ print('Generated allocation')
+ allocation_filepath = os.path.join(skale_node_path, 'schain_allocation.yml')
+ save_allocation(allocation, allocation_filepath)
+ print(f'Results saved to {allocation_filepath}')
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tests/schain_allocation_test.py b/tests/schain_allocation_test.py
new file mode 100644
index 000000000..57c1cb816
--- /dev/null
+++ b/tests/schain_allocation_test.py
@@ -0,0 +1,122 @@
+import pytest
+from tools.configs import CONFIG_FOLDER
+from tests.schain_allocation import generate_schain_allocation
+
+EXPECTED_SCHAIN_ALLOCATION = [
+ (
+ 'mainnet',
+ 'medium',
+ 'default',
+ {
+ 'max_consensus_storage_bytes': 63269997772,
+ 'max_file_storage_bytes': 63269997772,
+ 'max_reserved_storage_bytes': 21089999257,
+ 'max_skaled_leveldb_storage_bytes': 63269997772,
+ },
+ ),
+ (
+ 'mainnet',
+ 'medium',
+ 'no_filestorage',
+ {
+ 'max_consensus_storage_bytes': 94904996659,
+ 'max_file_storage_bytes': 0,
+ 'max_reserved_storage_bytes': 21089999257,
+ 'max_skaled_leveldb_storage_bytes': 94904996659,
+ },
+ ),
+ (
+ 'mainnet',
+ 'medium',
+ 'max_contract_storage',
+ {
+ 'max_consensus_storage_bytes': 28471498997,
+ 'max_file_storage_bytes': 0,
+ 'max_reserved_storage_bytes': 21089999257,
+ 'max_skaled_leveldb_storage_bytes': 161338494320,
+ },
+ ),
+ (
+ 'mainnet',
+ 'medium',
+ 'max_consensus_db',
+ {
+ 'max_consensus_storage_bytes': 151847994654,
+ 'max_file_storage_bytes': 0,
+ 'max_reserved_storage_bytes': 21089999257,
+ 'max_skaled_leveldb_storage_bytes': 37961998663,
+ },
+ ),
+ (
+ 'mainnet',
+ 'medium',
+ 'max_filestorage',
+ {
+ 'max_consensus_storage_bytes': 28471498997,
+ 'max_file_storage_bytes': 132866995322,
+ 'max_reserved_storage_bytes': 21089999257,
+ 'max_skaled_leveldb_storage_bytes': 28471498997,
+ },
+ ),
+]
+
+
+EXPECTED_LEVELDB_ALLOCATION = [
+ (
+ 'mainnet',
+ 'medium',
+ 'default',
+ {'contract_storage': 37961998663, 'db_storage': 12653999554},
+ ),
+ (
+ 'mainnet',
+ 'medium',
+ 'no_filestorage',
+ {'contract_storage': 56942997995, 'db_storage': 18980999331},
+ ),
+ (
+ 'mainnet',
+ 'medium',
+ 'max_contract_storage',
+ {'contract_storage': 96803096592, 'db_storage': 32267698864},
+ ),
+ (
+ 'mainnet',
+ 'medium',
+ 'max_consensus_db',
+ {'contract_storage': 22777199197, 'db_storage': 7592399732},
+ ),
+ (
+ 'mainnet',
+ 'medium',
+ 'max_filestorage',
+ {'contract_storage': 17082899398, 'db_storage': 5694299799},
+ ),
+]
+
+
+@pytest.fixture(scope='module')
+def schain_allocation():
+ return generate_schain_allocation(CONFIG_FOLDER)
+
+
+@pytest.mark.parametrize(
+ 'network_type,size_name,allocation_type,expected', EXPECTED_SCHAIN_ALLOCATION
+)
+def test_schain_allocation(network_type, size_name, allocation_type, expected, schain_allocation):
+ volume_limits = schain_allocation[network_type]['volume_limits']
+ assert volume_limits[size_name][allocation_type] == expected
+
+
+@pytest.mark.parametrize(
+ 'network_type,size_name,allocation_type,expected', EXPECTED_LEVELDB_ALLOCATION
+)
+def test_leveldb_allocation(network_type, size_name, allocation_type, expected, schain_allocation):
+ leveldb_limits = schain_allocation[network_type]['leveldb_limits']
+ assert leveldb_limits[size_name][allocation_type] == expected
+
+
+def test_schain_allocation_testnet(schain_allocation):
+ allocation = schain_allocation
+ assert allocation['qanet']['volume_limits'] == allocation['testnet']['volume_limits']
+ assert allocation['qanet']['leveldb_limits'] == allocation['testnet']['leveldb_limits']
diff --git a/tests/schains/checks_test.py b/tests/schains/checks_test.py
index 2e86f4dca..4baa89dce 100644
--- a/tests/schains/checks_test.py
+++ b/tests/schains/checks_test.py
@@ -32,7 +32,7 @@
from tests.utils import (
CONFIG_STREAM,
generate_schain_config,
- get_schain_contracts_data,
+ get_schain_struct,
response_mock,
request_mock
)
@@ -207,7 +207,7 @@ def test_ima_container_check(schain_checks, cleanup_ima_containers, dutils):
ts = int(time.time())
mts = ts + 3600
name = schain_checks.name
- schain = get_schain_contracts_data(name)
+ schain = get_schain_struct(name)
image = get_image_name(image_type=IMA_CONTAINER)
# new_image = get_image_name(type=IMA_CONTAINER, new=True)
diff --git a/tests/schains/cleaner_test.py b/tests/schains/cleaner_test.py
index c9df7b224..d16b41fd3 100644
--- a/tests/schains/cleaner_test.py
+++ b/tests/schains/cleaner_test.py
@@ -28,7 +28,7 @@
SChainRecord, mark_schain_deleted, upsert_schain_record)
-from tests.utils import (get_schain_contracts_data,
+from tests.utils import (get_schain_struct,
run_simple_schain_container,
run_simple_ima_container)
@@ -124,7 +124,7 @@ def test_remove_schain_volume(dutils, schain_config):
def schain_container(schain_config, ssl_folder, dutils):
""" Creates and removes schain container """
schain_name = schain_config['skaleConfig']['sChain']['schainName']
- schain_data = get_schain_contracts_data(schain_name)
+ schain_data = get_schain_struct(schain_name)
try:
run_simple_schain_container(schain_data, dutils)
yield schain_name
@@ -147,7 +147,7 @@ def test_remove_schain_container(
cert_key_pair
):
schain_name = schain_config['skaleConfig']['sChain']['schainName']
- schain_data = get_schain_contracts_data(schain_name)
+ schain_data = get_schain_struct(schain_name)
run_simple_schain_container(schain_data, dutils)
container_name = SCHAIN_CONTAINER_NAME_TEMPLATE.format(schain_name)
assert is_container_running(dutils, container_name)
@@ -158,7 +158,7 @@ def test_remove_schain_container(
@pytest.mark.skip('Docker API GA issues need to be resolved')
def test_remove_ima_container(dutils, schain_container):
schain_name = schain_container
- schain_data = get_schain_contracts_data(schain_name)
+ schain_data = get_schain_struct(schain_name)
with mock.patch('core.schains.runner.get_ima_env', return_value=ImaEnv(
schain_dir='/'
)):
diff --git a/tests/schains/config/generator_test.py b/tests/schains/config/generator_test.py
index 570f766c4..e2281182c 100644
--- a/tests/schains/config/generator_test.py
+++ b/tests/schains/config/generator_test.py
@@ -3,90 +3,147 @@
from pathlib import Path
import pytest
+from skale.contracts.manager.schains import SchainStructure
+from skale.dataclasses.schain_options import AllocationType
from etherbase_predeployed import ETHERBASE_ADDRESS, ETHERBASE_IMPLEMENTATION_ADDRESS
from marionette_predeployed import MARIONETTE_ADDRESS, MARIONETTE_IMPLEMENTATION_ADDRESS
from filestorage_predeployed import FILESTORAGE_ADDRESS, FILESTORAGE_IMPLEMENTATION_ADDRESS
from config_controller_predeployed import (
CONFIG_CONTROLLER_ADDRESS,
- CONFIG_CONTROLLER_IMPLEMENTATION_ADDRESS
+ CONFIG_CONTROLLER_IMPLEMENTATION_ADDRESS,
)
from multisigwallet_predeployed import MULTISIGWALLET_ADDRESS
from ima_predeployed.generator import MESSAGE_PROXY_FOR_SCHAIN_ADDRESS
from core.schains.config.generator import (
- generate_schain_config_with_skale, generate_schain_config, get_schain_originator
+ generate_schain_config_with_skale,
+ generate_schain_config,
+ get_schain_originator,
)
from core.schains.config.helper import get_schain_id
from core.schains.config.predeployed import PROXY_ADMIN_PREDEPLOYED_ADDRESS
from tools.configs.schains import SCHAINS_DIR_PATH
from tools.node_options import NodeOptions
+from tests.utils import get_schain_struct, TEST_ORIGINATOR_ADDRESS, TEST_MAINNET_OWNER_ADDRESS
+
NODE_ID = 1
ECDSA_KEY_NAME = 'TEST:KEY:NAME'
-COMMON_BLS_PUBLIC_KEY = [123, 456, 789, 123],
+COMMON_BLS_PUBLIC_KEY = ([123, 456, 789, 123],)
SECRET_KEY = {
- "key_share_name": "BLS_KEY:SCHAIN_ID:1:NODE_ID:0:DKG_ID:0",
- "t": 3,
- "n": 4,
- "common_public_key": COMMON_BLS_PUBLIC_KEY,
- "public_key": [
- "123",
- "456",
- "789",
- "123"
- ],
- "bls_public_keys": [
- "347043388985314611088523723672849261459066865147342514766975146031592968981:16865625797537152485129819826310148884042040710059790347821575891945447848787:12298029821069512162285775240688220379514183764628345956323231135392667898379:8", # noqa
- "347043388985314611088523723672849261459066865147342514766975146031592968982:16865625797537152485129819826310148884042040710059790347821575891945447848788:12298029821069512162285775240688220379514183764628345956323231135392667898380:9" # noqa
+ 'key_share_name': 'BLS_KEY:SCHAIN_ID:1:NODE_ID:0:DKG_ID:0',
+ 't': 3,
+ 'n': 4,
+ 'common_public_key': COMMON_BLS_PUBLIC_KEY,
+ 'public_key': ['123', '456', '789', '123'],
+ 'bls_public_keys': [
+ '347043388985314611088523723672849261459066865147342514766975146031592968981:16865625797537152485129819826310148884042040710059790347821575891945447848787:12298029821069512162285775240688220379514183764628345956323231135392667898379:8', # noqa
+ '347043388985314611088523723672849261459066865147342514766975146031592968982:16865625797537152485129819826310148884042040710059790347821575891945447848788:12298029821069512162285775240688220379514183764628345956323231135392667898380:9', # noqa
],
}
-TEST_ORIGINATOR_ADDRESS = '0x0B5e3eBB74eE281A24DDa3B1A4e70692c15EAC34'
-TEST_MAINNET_OWNER_ADDRESS = '0x30E1C96277735B03E59B3098204fd04FD0e78a46'
+NODE_GROUPS = {
+ 2: {
+ "rotation": {
+ "leaving_node_id": 0,
+ "new_node_id": 5,
+ },
+ "nodes": {
+ "4": [
+ 4,
+ 31,
+ "0x5d"
+ ],
+ "5": [
+ 8,
+ 179,
+ "0xon"
+ ],
+ },
+ "finish_ts": 1681498775,
+ "bls_public_key": {
+ "blsPublicKey0": "9",
+ "blsPublicKey1": "1",
+ "blsPublicKey2": "3",
+ "blsPublicKey3": "2"
+ }
+ },
+ 1: {
+ "rotation": {
+ "leaving_node_id": 3,
+ "new_node_id": 4,
+ },
+ "nodes": {
+ "0": [
+ 0,
+ 159,
+ "0xgd"
+ ],
+ "4": [
+ 4,
+ 31,
+ "0x5d"
+ ],
+ },
+ "finish_ts": 1681390775,
+ "bls_public_key": {
+ "blsPublicKey0": "3",
+ "blsPublicKey1": "4",
+ "blsPublicKey2": "7",
+ "blsPublicKey3": "9"
+ }
+ },
+ 0: {
+ "rotation": {
+ "leaving_node_id": 2,
+ "new_node_id": 3,
+ },
+ "nodes": {
+ "0": [
+ 0,
+ 159,
+ "0xgd"
+ ],
+ "3": [
+ 7,
+ 61,
+ "0xbh"
+ ],
+ },
+ "finish_ts": None,
+ "bls_public_key": None
+ }
+}
TEST_NODE = {'id': 1, 'name': 'test', 'publicKey': '0x5556', 'port': 10000}
-SCHAIN_WITHOUT_ORIGINATOR = {
- 'name': 'test_schain',
- 'partOfNode': 0,
- 'generation': 1,
- 'mainnetOwner': TEST_MAINNET_OWNER_ADDRESS,
- 'originator': '0x0000000000000000000000000000000000000000',
- 'multitransactionMode': True
-}
+def get_schain_struct_no_originator() -> SchainStructure:
+ schain = get_schain_struct(schain_name='test_schain')
+ schain.originator = '0x0000000000000000000000000000000000000000'
+ return schain
-SCHAIN_WITH_ORIGINATOR = {
- 'name': 'test_schain',
- 'partOfNode': 0,
- 'generation': 1,
- 'mainnetOwner': TEST_MAINNET_OWNER_ADDRESS,
- 'originator': TEST_ORIGINATOR_ADDRESS,
- 'multitransactionMode': True
-}
-SCHAIN_WITH_STATIC_ACCOUNTS = {
- 'name': 'static_chain',
- 'partOfNode': 0,
- 'generation': 1,
- 'mainnetOwner': TEST_MAINNET_OWNER_ADDRESS,
- 'originator': TEST_ORIGINATOR_ADDRESS,
- 'multitransactionMode': True
-}
+def get_schain_struct_static_account() -> SchainStructure:
+ schain = get_schain_struct(schain_name='static_chain')
+ return schain
def get_schain_node_with_schains(schain_name: str) -> list:
- return [{
- 'name': 'test',
- 'ip': b'\x01\x02\x03\x04',
- 'publicIP': b'\x01\x02\x03\x04',
- 'publicKey': '0x0B5e3eBB74eE281A24DDa3B1A4e70692c15EAC34',
- 'port': 10000,
- 'id': 1,
- 'schains': [{'name': schain_name}]
- }]
+ schain = get_schain_struct(schain_name=schain_name)
+ return [
+ {
+ 'name': 'test',
+ 'ip': b'\x01\x02\x03\x04',
+ 'publicIP': b'\x01\x02\x03\x04',
+ 'publicKey': '0x0B5e3eBB74eE281A24DDa3B1A4e70692c15EAC34',
+ 'port': 10000,
+ 'id': 1,
+ 'schains': [schain],
+ }
+ ]
@pytest.fixture
@@ -141,13 +198,32 @@ def check_node_bls_keys(info, index):
def check_node_info(node_id, info):
keys = [
- 'nodeID', 'nodeName', 'basePort', 'httpRpcPort', 'httpsRpcPort',
- 'wsRpcPort', 'wssRpcPort', 'bindIP', 'logLevel', 'logLevelConfig',
- 'imaMessageProxySChain', 'imaMessageProxyMainNet',
- 'ecdsaKeyName', 'wallets', 'minCacheSize',
- 'maxCacheSize', 'collectionQueueSize', 'collectionDuration',
- 'transactionQueueSize', 'maxOpenLeveldbFiles', 'info-acceptors', 'imaMonitoringPort',
- 'skale-manager', 'syncNode', 'pg-threads', 'pg-threads-limit'
+ 'nodeID',
+ 'nodeName',
+ 'basePort',
+ 'httpRpcPort',
+ 'httpsRpcPort',
+ 'wsRpcPort',
+ 'wssRpcPort',
+ 'bindIP',
+ 'logLevel',
+ 'logLevelConfig',
+ 'imaMessageProxySChain',
+ 'imaMessageProxyMainNet',
+ 'ecdsaKeyName',
+ 'wallets',
+ 'minCacheSize',
+ 'maxCacheSize',
+ 'collectionQueueSize',
+ 'collectionDuration',
+ 'transactionQueueSize',
+ 'maxOpenLeveldbFiles',
+ 'info-acceptors',
+ 'imaMonitoringPort',
+ 'skale-manager',
+ 'syncNode',
+ 'pg-threads',
+ 'pg-threads-limit',
]
check_keys(info, keys)
@@ -158,11 +234,27 @@ def check_node_info(node_id, info):
def check_schain_node_info(node_id, schain_node_info, index):
- check_keys(schain_node_info,
- ['nodeID', 'nodeName', 'basePort', 'httpRpcPort',
- 'httpsRpcPort', 'wsRpcPort', 'wssRpcPort', 'publicKey',
- 'blsPublicKey0', 'blsPublicKey1', 'blsPublicKey2',
- 'blsPublicKey3', 'owner', 'schainIndex', 'ip', 'publicIP'])
+ check_keys(
+ schain_node_info,
+ [
+ 'nodeID',
+ 'nodeName',
+ 'basePort',
+ 'httpRpcPort',
+ 'httpsRpcPort',
+ 'wsRpcPort',
+ 'wssRpcPort',
+ 'publicKey',
+ 'blsPublicKey0',
+ 'blsPublicKey1',
+ 'blsPublicKey2',
+ 'blsPublicKey3',
+ 'owner',
+ 'schainIndex',
+ 'ip',
+ 'publicIP',
+ ],
+ )
assert schain_node_info['nodeID'] == node_id
check_node_ports(schain_node_info)
check_node_bls_keys(schain_node_info, index)
@@ -171,34 +263,36 @@ def check_schain_node_info(node_id, schain_node_info, index):
def check_schain_info(node_ids, schain_info):
check_keys(
schain_info,
- ['schainID', 'schainName', 'blockAuthor', 'contractStorageLimit',
- 'dbStorageLimit', 'snapshotIntervalSec', 'emptyBlockIntervalMs',
- 'maxConsensusStorageBytes', 'maxSkaledLeveldbStorageBytes',
- 'maxFileStorageBytes', 'maxReservedStorageBytes',
- 'nodes', 'revertableFSPatchTimestamp', 'contractStoragePatchTimestamp']
+ [
+ 'schainID',
+ 'schainName',
+ 'blockAuthor',
+ 'contractStorageLimit',
+ 'dbStorageLimit',
+ 'snapshotIntervalSec',
+ 'emptyBlockIntervalMs',
+ 'maxConsensusStorageBytes',
+ 'maxSkaledLeveldbStorageBytes',
+ 'maxFileStorageBytes',
+ 'maxReservedStorageBytes',
+ 'nodes',
+ 'revertableFSPatchTimestamp',
+ 'contractStoragePatchTimestamp',
+ ],
)
- for index, (nid, schain_node_info) in enumerate(zip(
- node_ids,
- schain_info['nodes']
- )):
+ for index, (nid, schain_node_info) in enumerate(zip(node_ids, schain_info['nodes'])):
check_schain_node_info(nid, schain_node_info, index)
def check_config(node_id, all_node_ids, config):
- check_keys(
- config,
- ['sealEngine', 'params', 'unddos', 'genesis', 'accounts', 'skaleConfig']
- )
+ check_keys(config, ['sealEngine', 'params', 'unddos', 'genesis', 'accounts', 'skaleConfig'])
assert config['params']['skaleDisableChainIdCheck'] is True
check_node_info(node_id, config['skaleConfig']['nodeInfo'])
check_schain_info(all_node_ids, config['skaleConfig']['sChain'])
def test_generate_schain_config_with_skale(
- skale,
- node_config,
- schain_on_contracts,
- schain_secret_key_file
+ skale, node_config, schain_on_contracts, schain_secret_key_file
):
schain_name = schain_on_contracts
node_ids = skale.schains_internal.get_node_ids_for_schain(schain_name)
@@ -211,27 +305,18 @@ def test_generate_schain_config_with_skale(
rotation_data={'rotation_id': 0, 'leaving_node': 1},
ecdsa_key_name=ECDSA_KEY_NAME,
generation=0,
- node_options=NodeOptions()
+ node_options=NodeOptions(),
)
check_config(current_node_id, node_ids, schain_config.to_dict())
def test_generate_schain_config_gen0(schain_secret_key_file_default_chain, skale_manager_opts):
- schain = {
- 'name': 'test_schain',
- 'partOfNode': 0,
- 'generation': 0,
- 'mainnetOwner': '0x30E1C96277735B03E59B3098204fd04FD0e78a46',
- 'originator': TEST_ORIGINATOR_ADDRESS,
- 'multitransactionMode': True
- }
-
node_id, generation, rotation_id = 1, 0, 0
ecdsa_key_name = 'test'
node_groups = {}
schain_config = generate_schain_config(
- schain=schain,
+ schain=get_schain_struct(schain_name='test_schain'),
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
@@ -242,7 +327,7 @@ def test_generate_schain_config_gen0(schain_secret_key_file_default_chain, skale
is_owner_contract=False,
skale_manager_opts=skale_manager_opts,
common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
- schain_base_port=10000
+ schain_base_port=10000,
)
config = schain_config.to_dict()
@@ -256,7 +341,7 @@ def test_generate_schain_config_gen1(schain_secret_key_file_default_chain, skale
node_groups = {}
schain_config = generate_schain_config(
- schain=SCHAIN_WITH_ORIGINATOR,
+ schain=get_schain_struct(schain_name='test_schain'),
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
@@ -267,7 +352,7 @@ def test_generate_schain_config_gen1(schain_secret_key_file_default_chain, skale
is_owner_contract=True,
skale_manager_opts=skale_manager_opts,
common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
- schain_base_port=10000
+ schain_base_port=10000,
)
config = schain_config.to_dict()
@@ -294,15 +379,14 @@ def test_generate_schain_config_gen1(schain_secret_key_file_default_chain, skale
def test_generate_schain_config_gen1_pk_owner(
- schain_secret_key_file_default_chain,
- skale_manager_opts
+ schain_secret_key_file_default_chain, skale_manager_opts
):
node_id, generation, rotation_id = 1, 1, 0
ecdsa_key_name = 'test'
node_groups = {}
schain_config = generate_schain_config(
- schain=SCHAIN_WITHOUT_ORIGINATOR,
+ schain=get_schain_struct_no_originator(),
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
@@ -313,7 +397,7 @@ def test_generate_schain_config_gen1_pk_owner(
is_owner_contract=False,
skale_manager_opts=skale_manager_opts,
common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
- schain_base_port=10000
+ schain_base_port=10000,
)
config = schain_config.to_dict()
@@ -322,15 +406,14 @@ def test_generate_schain_config_gen1_pk_owner(
def test_generate_schain_config_gen2_schain_id(
- schain_secret_key_file_default_chain,
- skale_manager_opts
+ schain_secret_key_file_default_chain, skale_manager_opts
):
node_id, generation, rotation_id = 1, 2, 0
ecdsa_key_name = 'test'
node_groups = {}
schain_config = generate_schain_config(
- schain=SCHAIN_WITHOUT_ORIGINATOR,
+ schain=get_schain_struct_no_originator(),
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
@@ -341,19 +424,18 @@ def test_generate_schain_config_gen2_schain_id(
is_owner_contract=False,
skale_manager_opts=skale_manager_opts,
common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
- schain_base_port=10000
+ schain_base_port=10000,
)
config = schain_config.to_dict()
assert config['skaleConfig']['sChain']['schainID'] == 2755779573749746
def test_generate_schain_config_gen1_schain_id(
- schain_secret_key_file_default_chain,
- skale_manager_opts
+ schain_secret_key_file_default_chain, skale_manager_opts
):
node_id, generation, rotation_id = 1, 1, 0
schain_config = generate_schain_config(
- schain=SCHAIN_WITHOUT_ORIGINATOR,
+ schain=get_schain_struct_no_originator(),
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name='test',
@@ -364,19 +446,18 @@ def test_generate_schain_config_gen1_schain_id(
is_owner_contract=False,
skale_manager_opts=skale_manager_opts,
common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
- schain_base_port=10000
+ schain_base_port=10000,
)
config = schain_config.to_dict()
assert config['skaleConfig']['sChain']['schainID'] == 1
def test_generate_schain_config_gen0_schain_id(
- schain_secret_key_file_default_chain,
- skale_manager_opts
+ schain_secret_key_file_default_chain, skale_manager_opts
):
node_id, generation, rotation_id = 1, 0, 0
schain_config = generate_schain_config(
- schain=SCHAIN_WITHOUT_ORIGINATOR,
+ schain=get_schain_struct_no_originator(),
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name='test',
@@ -387,17 +468,66 @@ def test_generate_schain_config_gen0_schain_id(
is_owner_contract=False,
skale_manager_opts=skale_manager_opts,
common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
- schain_base_port=10000
+ schain_base_port=10000,
)
config = schain_config.to_dict()
assert config['skaleConfig']['sChain']['schainID'] == 1
+def test_generate_schain_config_allocation_type(
+ schain_secret_key_file_default_chain, skale_manager_opts
+):
+ node_id, generation, rotation_id = 1, 1, 0
+ ecdsa_key_name = 'test'
+ node_groups = {}
+
+ schain = get_schain_struct(schain_name='test_schain')
+ schain.options.allocation_type = AllocationType.NO_FILESTORAGE
+
+ schain_config = generate_schain_config(
+ schain=schain,
+ node=TEST_NODE,
+ node_id=node_id,
+ ecdsa_key_name=ecdsa_key_name,
+ rotation_id=rotation_id,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
+ node_groups=node_groups,
+ generation=generation,
+ is_owner_contract=True,
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000,
+ )
+ config = schain_config.to_dict()
+ assert config['skaleConfig']['sChain']['maxConsensusStorageBytes'] == 94904996659
+ assert config['skaleConfig']['sChain']['maxSkaledLeveldbStorageBytes'] == 94904996659
+ assert config['skaleConfig']['sChain']['maxFileStorageBytes'] == 0
+
+ schain = get_schain_struct(schain_name='test_schain')
+ schain.options.allocation_type = AllocationType.MAX_CONSENSUS_DB
+
+ schain_config = generate_schain_config(
+ schain=schain,
+ node=TEST_NODE,
+ node_id=node_id,
+ ecdsa_key_name=ecdsa_key_name,
+ rotation_id=rotation_id,
+ schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
+ node_groups=node_groups,
+ generation=generation,
+ is_owner_contract=True,
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000,
+ )
+ config = schain_config.to_dict()
+ assert config['skaleConfig']['sChain']['maxConsensusStorageBytes'] == 151847994654
+ assert config['skaleConfig']['sChain']['maxSkaledLeveldbStorageBytes'] == 37961998663
+ assert config['skaleConfig']['sChain']['maxFileStorageBytes'] == 0
+
+
def test_generate_schain_config_with_skale_gen2(
- skale,
- schain_on_contracts,
- schain_secret_key_file,
- node_config
+ skale, schain_on_contracts, schain_secret_key_file, node_config
):
schain_name = schain_on_contracts
node_ids = skale.schains_internal.get_node_ids_for_schain(schain_name)
@@ -409,7 +539,7 @@ def test_generate_schain_config_with_skale_gen2(
node_config=node_config,
rotation_data={'rotation_id': 0, 'leaving_node': 1},
ecdsa_key_name=ECDSA_KEY_NAME,
- generation=2
+ generation=2,
)
schain_config_dict = schain_config.to_dict()
check_config(current_node_id, node_ids, schain_config_dict)
@@ -417,23 +547,20 @@ def test_generate_schain_config_with_skale_gen2(
def test_get_schain_originator(predeployed_ima):
- originator = get_schain_originator(SCHAIN_WITHOUT_ORIGINATOR)
+ originator = get_schain_originator(get_schain_struct_no_originator())
assert originator == TEST_MAINNET_OWNER_ADDRESS
- originator = get_schain_originator(SCHAIN_WITH_ORIGINATOR)
+ originator = get_schain_originator(get_schain_struct())
assert originator == TEST_ORIGINATOR_ADDRESS
-def test_generate_sync_node_config(
- schain_secret_key_file_default_chain,
- skale_manager_opts
-):
+def test_generate_sync_node_config(schain_secret_key_file_default_chain, skale_manager_opts):
node_id, generation, rotation_id = 1, 1, 0
ecdsa_key_name = 'test'
node_groups = {}
schain_config = generate_schain_config(
- schain=SCHAIN_WITHOUT_ORIGINATOR,
+ schain=get_schain_struct_no_originator(),
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
@@ -445,24 +572,23 @@ def test_generate_sync_node_config(
skale_manager_opts=skale_manager_opts,
common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
schain_base_port=10000,
- sync_node=True
+ sync_node=True,
)
config = schain_config.to_dict()
assert config['skaleConfig']['nodeInfo']['syncNode']
- assert config['skaleConfig']['sChain']['dbStorageLimit'] == 284999761
+ assert config['skaleConfig']['sChain']['dbStorageLimit'] == 12653999554
def test_generate_sync_node_config_archive_catchup(
- schain_secret_key_file_default_chain,
- skale_manager_opts
+ schain_secret_key_file_default_chain, skale_manager_opts
):
node_id, generation, rotation_id = 1, 1, 0
ecdsa_key_name = 'test'
node_groups = {}
schain_config = generate_schain_config(
- schain=SCHAIN_WITHOUT_ORIGINATOR,
+ schain=get_schain_struct_no_originator(),
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
@@ -474,17 +600,16 @@ def test_generate_sync_node_config_archive_catchup(
skale_manager_opts=skale_manager_opts,
common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
schain_base_port=10000,
- sync_node=True
+ sync_node=True,
)
config = schain_config.to_dict()
assert not config['skaleConfig']['nodeInfo'].get('syncFromCatchup')
assert not config['skaleConfig']['nodeInfo'].get('archiveMode')
- assert config['skaleConfig']['sChain'].get('maxConsensusStorageBytes') < \
- 1000000000000000000
+ assert config['skaleConfig']['sChain'].get('maxConsensusStorageBytes') < 1000000000000000000
schain_config = generate_schain_config(
- schain=SCHAIN_WITHOUT_ORIGINATOR,
+ schain=get_schain_struct_no_originator(),
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
@@ -498,17 +623,16 @@ def test_generate_sync_node_config_archive_catchup(
schain_base_port=10000,
sync_node=True,
archive=False,
- catchup=True
+ catchup=True,
)
config = schain_config.to_dict()
assert config['skaleConfig']['nodeInfo'].get('syncFromCatchup')
assert config['skaleConfig']['nodeInfo'].get('archiveMode') is False
- assert config['skaleConfig']['sChain'].get('maxConsensusStorageBytes') < \
- 1000000000000000000
+ assert config['skaleConfig']['sChain'].get('maxConsensusStorageBytes') < 1000000000000000000
schain_config = generate_schain_config(
- schain=SCHAIN_WITHOUT_ORIGINATOR,
+ schain=get_schain_struct_no_originator(),
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
@@ -522,17 +646,16 @@ def test_generate_sync_node_config_archive_catchup(
schain_base_port=10000,
sync_node=False,
archive=False,
- catchup=True
+ catchup=True,
)
config = schain_config.to_dict()
assert config['skaleConfig']['nodeInfo'].get('syncFromCatchup') is None
assert config['skaleConfig']['nodeInfo'].get('archiveMode') is None
- assert config['skaleConfig']['sChain'].get('maxConsensusStorageBytes') < \
- 1000000000000000000
+ assert config['skaleConfig']['sChain'].get('maxConsensusStorageBytes') < 1000000000000000000
schain_config = generate_schain_config(
- schain=SCHAIN_WITHOUT_ORIGINATOR,
+ schain=get_schain_struct_no_originator(),
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
@@ -546,26 +669,24 @@ def test_generate_sync_node_config_archive_catchup(
schain_base_port=10000,
sync_node=True,
archive=True,
- catchup=True
+ catchup=True,
)
config = schain_config.to_dict()
assert config['skaleConfig']['nodeInfo'].get('syncFromCatchup')
assert config['skaleConfig']['nodeInfo'].get('archiveMode')
- assert config['skaleConfig']['sChain'].get('maxConsensusStorageBytes') == \
- 1000000000000000000
+ assert config['skaleConfig']['sChain'].get('maxConsensusStorageBytes') == 1000000000000000000
def test_generate_sync_node_config_static_accounts(
- schain_secret_key_file_default_chain,
- skale_manager_opts
+ schain_secret_key_file_default_chain, skale_manager_opts
):
node_id, generation, rotation_id = 1, 1, 0
ecdsa_key_name = 'test'
node_groups = {}
schain_config = generate_schain_config(
- schain=SCHAIN_WITH_STATIC_ACCOUNTS,
+ schain=get_schain_struct_static_account(),
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
@@ -577,26 +698,67 @@ def test_generate_sync_node_config_static_accounts(
skale_manager_opts=skale_manager_opts,
common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
schain_base_port=10000,
- sync_node=True
+ sync_node=True,
)
config = schain_config.to_dict()
assert config['accounts'].get('0x1111111')
assert config['accounts']['0x1111111']['balance'] == '1000000000000000000000000000000'
+ schain = get_schain_struct()
+
schain_config = generate_schain_config(
- schain=SCHAIN_WITH_ORIGINATOR,
+ schain=schain,
node=TEST_NODE,
node_id=node_id,
ecdsa_key_name=ecdsa_key_name,
rotation_id=rotation_id,
- schain_nodes_with_schains=get_schain_node_with_schains('test_schain'),
+ schain_nodes_with_schains=get_schain_node_with_schains(schain.name),
node_groups=node_groups,
generation=generation,
is_owner_contract=False,
skale_manager_opts=skale_manager_opts,
common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
schain_base_port=10000,
- sync_node=True
+ sync_node=True,
)
config = schain_config.to_dict()
assert not config['accounts'].get('0x1111111')
+
+
+def test_generate_config_static_groups(
+ _schain_name,
+ schain_secret_key_file_default_chain,
+ static_groups_for_schain,
+ skale_manager_opts
+):
+ node_id, generation, rotation_id = 1, 1, 0
+ ecdsa_key_name = 'test'
+
+ schain = get_schain_struct(schain_name=_schain_name)
+ schain.mainnet_owner = TEST_MAINNET_OWNER_ADDRESS
+ schain.originator = TEST_ORIGINATOR_ADDRESS
+ schain.options.multitransaction_mode = True
+
+ schain_config = generate_schain_config(
+ schain=schain,
+ node=TEST_NODE,
+ node_id=node_id,
+ ecdsa_key_name=ecdsa_key_name,
+ rotation_id=rotation_id,
+ schain_nodes_with_schains=get_schain_node_with_schains(_schain_name),
+ node_groups=NODE_GROUPS,
+ generation=generation,
+ is_owner_contract=False,
+ skale_manager_opts=skale_manager_opts,
+ common_bls_public_keys=COMMON_BLS_PUBLIC_KEY,
+ schain_base_port=10000,
+ sync_node=True
+ )
+ config = schain_config.to_dict()
+
+ config_group = config['skaleConfig']['sChain']['nodeGroups']
+ assert len(config_group.keys()) == 3
+ for rotation_id_string in static_groups_for_schain:
+ rotation_id = int(rotation_id_string)
+ assert json.dumps(config_group[rotation_id]) == \
+ json.dumps(static_groups_for_schain[rotation_id_string])
diff --git a/tests/schains/config/legacy_data_test.py b/tests/schains/config/legacy_data_test.py
new file mode 100644
index 000000000..8708dc3fb
--- /dev/null
+++ b/tests/schains/config/legacy_data_test.py
@@ -0,0 +1,25 @@
+import json
+
+from core.schains.config.legacy_data import is_static_accounts, static_accounts, static_groups
+from tests.utils import STATIC_NODE_GROUPS
+
+
+SCHAIN_NAME = 'test'
+
+
+def test_is_static_accounts():
+ assert is_static_accounts(SCHAIN_NAME)
+ assert not is_static_accounts('qwerty')
+
+
+def test_static_accounts():
+ accounts = static_accounts(SCHAIN_NAME)
+ assert isinstance(accounts, dict)
+ assert accounts.get('accounts', None)
+
+
+def test_static_groups(_schain_name, static_groups_for_schain):
+ groups = static_groups(_schain_name)
+ for key, value in STATIC_NODE_GROUPS.items():
+ assert json.dumps(groups[int(key)]) == json.dumps(value)
+ assert static_groups('not-exists') == {}
diff --git a/tests/schains/config/node_info_test.py b/tests/schains/config/node_info_test.py
index 9b3278d41..d25c905cf 100644
--- a/tests/schains/config/node_info_test.py
+++ b/tests/schains/config/node_info_test.py
@@ -4,6 +4,7 @@
from core.schains.config.node_info import generate_wallets_config, generate_current_node_info
from core.schains.types import SchainType
from tools.configs import SGX_SSL_KEY_FILEPATH, SGX_SSL_CERT_FILEPATH
+from tests.utils import get_schain_struct
COMMON_PUBLIC_KEY = [1, 2, 3, 4]
@@ -77,7 +78,7 @@ def test_generate_current_node_info(
node_id=1,
ecdsa_key_name='123',
static_node_info=static_node_info,
- schain={'name': _schain_name, 'partOfNode': 0},
+ schain=get_schain_struct(schain_name=_schain_name),
rotation_id=0,
skale_manager_opts=skale_manager_opts,
nodes_in_schain=4,
@@ -103,7 +104,7 @@ def test_generate_current_node_info(
node_id=1,
ecdsa_key_name='123',
static_node_info=static_node_info,
- schain={'name': _schain_name, 'partOfNode': 0},
+ schain=get_schain_struct(schain_name=_schain_name),
rotation_id=0,
skale_manager_opts=skale_manager_opts,
nodes_in_schain=4,
@@ -131,7 +132,7 @@ def test_skale_manager_opts(
node_id=1,
ecdsa_key_name='123',
static_node_info=static_node_info,
- schain={'name': _schain_name, 'partOfNode': 0},
+ schain=get_schain_struct(schain_name=_schain_name),
rotation_id=0,
skale_manager_opts=skale_manager_opts,
nodes_in_schain=4,
diff --git a/tests/schains/config/predeployed_test.py b/tests/schains/config/predeployed_test.py
index 06bf53c51..5c5c640b4 100644
--- a/tests/schains/config/predeployed_test.py
+++ b/tests/schains/config/predeployed_test.py
@@ -1,3 +1,5 @@
+from skale.dataclasses.schain_options import AllocationType
+
from marionette_predeployed import MARIONETTE_ADDRESS
from etherbase_predeployed import ETHERBASE_ADDRESS
from context_predeployed import CONTEXT_ADDRESS
@@ -17,6 +19,7 @@ def test_generate_predeployed_accounts():
predeployed_section = generate_predeployed_accounts(
schain_name='abc',
schain_type=SchainType.medium,
+ allocation_type=AllocationType.DEFAULT,
schain_nodes={},
on_chain_owner='0xD1000000000000000000000000000000000000D1',
mainnet_owner='0xD4000000000000000000000000000000000000D4',
@@ -28,6 +31,7 @@ def test_generate_predeployed_accounts():
predeployed_section = generate_predeployed_accounts(
schain_name='abc',
schain_type=SchainType.medium,
+ allocation_type=AllocationType.DEFAULT,
schain_nodes={},
on_chain_owner='0xD1000000000000000000000000000000000000D1',
mainnet_owner='0xD4000000000000000000000000000000000000D4',
@@ -40,6 +44,7 @@ def test_generate_predeployed_accounts():
def test_generate_v1_predeployed_contracts():
v1_precompiled_contracts = generate_v1_predeployed_contracts(
schain_type=SchainType.medium,
+ allocation_type=AllocationType.DEFAULT,
on_chain_owner=MARIONETTE_ADDRESS,
mainnet_owner='0x0123456789Ab',
message_proxy_for_schain_address='0x987654321fC',
diff --git a/tests/schains/config/static_accounts_test.py b/tests/schains/config/static_accounts_test.py
deleted file mode 100644
index bb4659281..000000000
--- a/tests/schains/config/static_accounts_test.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from core.schains.config.static_accounts import is_static_accounts, static_accounts
-
-SCHAIN_NAME = 'test'
-
-
-def test_is_static_accounts():
- assert is_static_accounts(SCHAIN_NAME)
- assert not is_static_accounts('qwerty')
-
-
-def test_static_accounts():
- accounts = static_accounts(SCHAIN_NAME)
- assert isinstance(accounts, dict)
- assert accounts.get('accounts', None)
diff --git a/tests/schains/config/static_params_test.py b/tests/schains/config/static_params_test.py
index 0fb7e4709..163e52c32 100644
--- a/tests/schains/config/static_params_test.py
+++ b/tests/schains/config/static_params_test.py
@@ -5,14 +5,14 @@
get_schain_static_param,
get_static_schain_cmd,
get_static_schain_info,
- get_static_node_info
+ get_static_node_info,
)
from tools.configs import ENV_TYPE
TEST_SCHAIN_NAME = 'test-schain'
-LEGACY_TS_NAME = 'revertableFSPatchTimestamp'
-NEW_TS_NAME = 'contractStorageZeroValuePatchTimestamp'
+DEFAULT_TS_NAME = 'revertableFSPatchTimestamp'
+CHAIN_SPECIFIC_TS_NAME = 'flexibleDeploymentPatchTimestamp'
def test_get_static_schain_cmd():
@@ -23,13 +23,24 @@ def test_get_static_schain_cmd():
def test_get_static_schain_info():
schain_info = get_static_schain_info(TEST_SCHAIN_NAME)
assert schain_info == {
- "contractStorageZeroValuePatchTimestamp": 1500000,
- "revertableFSPatchTimestamp": 1000000,
- "contractStoragePatchTimestamp": 1000000,
- "snapshotIntervalSec": 0,
- "emptyBlockIntervalMs": 10000,
- "snapshotDownloadTimeout": 18000,
- "snapshotDownloadInactiveTimeout": 120
+ 'contractStorageZeroValuePatchTimestamp': 1000000,
+ 'revertableFSPatchTimestamp': 1000000,
+ 'contractStoragePatchTimestamp': 1000000,
+ 'verifyDaSigsPatchTimestamp': 1000000,
+ 'storageDestructionPatchTimestamp': 1000000,
+ 'powCheckPatchTimestamp': 1000000,
+ 'skipInvalidTransactionsPatchTimestamp': 1000000,
+ 'pushZeroPatchTimestamp': 1712142000,
+ 'precompiledConfigPatchTimestamp': 1712314800,
+ 'correctForkInPowPatchTimestamp': 1711969200,
+ 'EIP1559TransactionsPatchTimestamp': 0,
+ 'fastConsensusPatchTimestamp': 0,
+ 'flexibleDeploymentPatchTimestamp': 1723460400,
+ 'verifyBlsSyncPatchTimestamp': 0,
+ 'snapshotIntervalSec': 3600,
+ 'emptyBlockIntervalMs': 10000,
+ 'snapshotDownloadTimeout': 18000,
+ 'snapshotDownloadInactiveTimeout': 120,
}
@@ -37,35 +48,36 @@ def test_get_static_schain_info_custom_chain_ts():
custom_schain_info = get_static_schain_info(TEST_SCHAIN_NAME)
default_schain_info = get_static_schain_info('test')
- assert custom_schain_info[LEGACY_TS_NAME] == default_schain_info[LEGACY_TS_NAME]
- assert custom_schain_info[NEW_TS_NAME] != default_schain_info[NEW_TS_NAME]
+ assert custom_schain_info[DEFAULT_TS_NAME] == default_schain_info[DEFAULT_TS_NAME]
+ assert custom_schain_info[CHAIN_SPECIFIC_TS_NAME] != default_schain_info[CHAIN_SPECIFIC_TS_NAME]
- assert custom_schain_info[NEW_TS_NAME] == 1500000
- assert default_schain_info[NEW_TS_NAME] == 800000
+ assert custom_schain_info[CHAIN_SPECIFIC_TS_NAME] == 1723460400
+ assert default_schain_info[CHAIN_SPECIFIC_TS_NAME] == 0
def test_get_schain_static_param():
static_params = get_static_params(ENV_TYPE)
legacy_ts_info = get_schain_static_param(
- static_params['schain'][LEGACY_TS_NAME],
- TEST_SCHAIN_NAME
+ static_params['schain'][DEFAULT_TS_NAME], TEST_SCHAIN_NAME
)
- assert legacy_ts_info == static_params['schain'].get(LEGACY_TS_NAME)
+ assert legacy_ts_info == static_params['schain'].get(DEFAULT_TS_NAME)
+ print(static_params['schain'])
new_ts_info_custom_chain = get_schain_static_param(
- static_params['schain'][NEW_TS_NAME],
- TEST_SCHAIN_NAME
+ static_params['schain'][CHAIN_SPECIFIC_TS_NAME], TEST_SCHAIN_NAME
)
- assert new_ts_info_custom_chain != static_params['schain'][NEW_TS_NAME]
- assert new_ts_info_custom_chain == static_params['schain'][NEW_TS_NAME][TEST_SCHAIN_NAME]
+
+ assert new_ts_info_custom_chain != static_params['schain'][CHAIN_SPECIFIC_TS_NAME]
+ assert new_ts_info_custom_chain == \
+ static_params['schain'][CHAIN_SPECIFIC_TS_NAME][TEST_SCHAIN_NAME]
new_ts_info_default_chain = get_schain_static_param(
- static_params['schain'][NEW_TS_NAME],
- 'test'
+ static_params['schain'][CHAIN_SPECIFIC_TS_NAME], 'test'
)
- assert new_ts_info_default_chain != static_params['schain'][NEW_TS_NAME]
- assert new_ts_info_default_chain != static_params['schain'][NEW_TS_NAME].get('test')
- assert new_ts_info_default_chain == static_params['schain'][NEW_TS_NAME].get('default')
+ assert new_ts_info_default_chain != static_params['schain'][CHAIN_SPECIFIC_TS_NAME]
+ assert new_ts_info_default_chain != static_params['schain'][CHAIN_SPECIFIC_TS_NAME].get('test')
+ assert new_ts_info_default_chain == \
+ static_params['schain'][CHAIN_SPECIFIC_TS_NAME].get('default')
def test_get_static_node_info():
diff --git a/tests/schains/monitor/action/skaled_action_test.py b/tests/schains/monitor/action/skaled_action_test.py
index 3281ec6bd..7f66943cf 100644
--- a/tests/schains/monitor/action/skaled_action_test.py
+++ b/tests/schains/monitor/action/skaled_action_test.py
@@ -31,7 +31,7 @@ def run_ima_container_mock(
dutils=None
):
image_name, container_name, _, _ = get_container_info(
- IMA_CONTAINER, schain['name'])
+ IMA_CONTAINER, schain.name)
image = image or image_name
dutils.safe_rm(container_name)
dutils.run_container(
@@ -46,6 +46,7 @@ def monitor_schain_container_mock(
schain_record,
skaled_status,
download_snapshot=False,
+ snapshot_from='',
start_ts=None,
abort_on_exit=True,
dutils=None,
@@ -53,7 +54,7 @@ def monitor_schain_container_mock(
historic_state=False
):
image_name, container_name, _, _ = get_container_info(
- SCHAIN_CONTAINER, schain['name'])
+ SCHAIN_CONTAINER, schain.name)
dutils.safe_rm(container_name)
if not skaled_status.exit_time_reached or not abort_on_exit:
dutils.run_container(
@@ -92,6 +93,7 @@ def skaled_am(
secret_key,
ssl_folder,
ima_migration_schedule,
+ ncli_status,
dutils,
skaled_checks
):
@@ -102,6 +104,7 @@ def skaled_am(
rule_controller=rule_controller,
checks=skaled_checks,
node_config=node_config,
+ ncli_status=ncli_status,
dutils=dutils
)
@@ -145,6 +148,7 @@ def test_skaled_container_with_snapshot_action(skaled_am):
schain_record=skaled_am.schain_record,
skaled_status=skaled_am.skaled_status,
download_snapshot=True,
+ snapshot_from='127.0.0.1',
start_ts=None,
abort_on_exit=True,
dutils=skaled_am.dutils,
@@ -174,6 +178,7 @@ def test_skaled_container_snapshot_delay_start_action(skaled_am):
start_ts=ts,
abort_on_exit=True,
dutils=skaled_am.dutils,
+ snapshot_from='127.0.0.1',
sync_node=False,
historic_state=False
)
@@ -282,7 +287,7 @@ def test_ima_container_action_from_scratch(
container_name = containers[0].name
assert container_name == f'skale_ima_{skaled_am.name}'
image = dutils.get_container_image_name(container_name)
- assert image == 'skalenetwork/ima:2.0.0-beta.9'
+ assert image == 'skalenetwork/ima:2.1.0'
# @pytest.mark.skip('Docker API GA issues need to be resolved')
@@ -304,8 +309,8 @@ def test_ima_container_action_image_pulling(
container_name = containers[0].name
assert container_name == f'skale_ima_{skaled_am.name}'
image = dutils.get_container_image_name(container_name)
- assert image == 'skalenetwork/ima:2.0.0-develop.3'
- assert dutils.pulled('skalenetwork/ima:2.0.0-beta.9')
+ assert image == 'skalenetwork/ima:2.1.0-beta.3'
+ assert dutils.pulled('skalenetwork/ima:2.1.0')
def test_ima_container_action_image_migration(
@@ -325,7 +330,7 @@ def test_ima_container_action_image_migration(
container_name = containers[0].name
assert container_name == f'skale_ima_{skaled_am.name}'
image = dutils.get_container_image_name(container_name)
- assert image == 'skalenetwork/ima:2.0.0-beta.9'
+ assert image == 'skalenetwork/ima:2.1.0'
def test_ima_container_action_time_frame_migration(
@@ -482,3 +487,12 @@ def test_firewall_rules_action(skaled_am, skaled_checks, rule_controller, econfi
SChainRule(port=10009),
SChainRule(port=10010, first_ip='127.0.0.2', last_ip='127.0.0.2')
]
+
+
+def test_disable_repair_mode(skaled_am):
+ skaled_am.schain_record.set_repair_mode(True)
+ assert skaled_am.schain_record.repair_mode
+ skaled_am.disable_repair_mode()
+ assert not skaled_am.schain_record.repair_mode
+ skaled_am.disable_repair_mode()
+ assert not skaled_am.schain_record.repair_mode
diff --git a/tests/schains/monitor/containers_test.py b/tests/schains/monitor/containers_test.py
index eb0922e14..6068a7785 100644
--- a/tests/schains/monitor/containers_test.py
+++ b/tests/schains/monitor/containers_test.py
@@ -1,12 +1,11 @@
import time
-
from unittest import mock
from core.schains.monitor.containers import monitor_schain_container
from core.schains.runner import is_container_exists
from web.models.schain import upsert_schain_record
-from tests.utils import run_custom_schain_container
+from tests.utils import get_schain_struct, run_custom_schain_container
def test_monitor_schain_container(
@@ -18,7 +17,7 @@ def test_monitor_schain_container(
cleanup_schain_containers
):
schain_record = upsert_schain_record(schain_db)
- schain = {'name': schain_db, 'partOfNode': 0, 'generation': 0}
+ schain = get_schain_struct(schain_name=schain_db)
monitor_schain_container(schain, schain_record, skaled_status, dutils=dutils)
assert not is_container_exists(schain_db, dutils=dutils)
@@ -37,7 +36,7 @@ def test_monitor_schain_container_exit_time_reached(
cleanup_schain_containers
):
schain_record = upsert_schain_record(schain_db)
- schain = {'name': schain_db, 'partOfNode': 0, 'generation': 0}
+ schain = get_schain_struct(schain_name=schain_db)
with mock.patch('core.schains.monitor.containers.is_volume_exists', return_value=True):
schain_record.set_failed_rpc_count(100)
@@ -73,10 +72,9 @@ def test_monitor_schain_container_ec(
cleanup_schain_containers
):
schain_record = upsert_schain_record(schain_db)
- schain = {'name': schain_db, 'partOfNode': 0, 'generation': 0}
- schain_name = schain_db
+ schain = get_schain_struct(schain_name=schain_db)
- run_custom_schain_container(dutils, schain_name, entrypoint=['sh', 'exit', '1'])
+ run_custom_schain_container(dutils, schain.name, entrypoint=['sh', 'exit', '1'])
# To make sure container initializaed
time.sleep(2)
with mock.patch('core.schains.monitor.containers.is_volume_exists', return_value=True):
diff --git a/tests/schains/monitor/main_test.py b/tests/schains/monitor/main_test.py
index 3c094ab4b..242fbe43e 100644
--- a/tests/schains/monitor/main_test.py
+++ b/tests/schains/monitor/main_test.py
@@ -1,17 +1,35 @@
-import mock
-from concurrent.futures import ThreadPoolExecutor
+import functools
+import logging
+import os
+import pathlib
+import shutil
+import time
+from concurrent.futures import Future
+from typing import Callable
+from unittest import mock
import pytest
from core.schains.firewall.types import IpRange
from core.schains.firewall.utils import get_sync_agent_ranges
-from core.schains.monitor.main import run_monitor_for_schain
-from core.schains.task import Task
-
+from core.schains.process import ProcessReport
+from core.schains.monitor.main import ConfigTask, SkaledTask
+from core.schains.monitor.tasks import execute_tasks, ITask
+from tools.configs.schains import SCHAINS_DIR_PATH
from tools.helper import is_node_part_of_chain
from web.models.schain import upsert_schain_record
+@pytest.fixture
+def tmp_dir(_schain_name):
+ path = os.path.join(SCHAINS_DIR_PATH, _schain_name)
+ pathlib.Path(path).mkdir()
+ try:
+ yield path
+ finally:
+ shutil.rmtree(path, ignore_errors=True)
+
+
@pytest.fixture
def sync_ranges(skale):
skale.sync_manager.grant_sync_manager_role(skale.wallet.address)
@@ -28,7 +46,7 @@ def test_get_sync_agent_ranges(skale, sync_ranges):
ranges = get_sync_agent_ranges(skale)
assert ranges == [
IpRange(start_ip='127.0.0.1', end_ip='127.0.0.2'),
- IpRange(start_ip='127.0.0.5', end_ip='127.0.0.7')
+ IpRange(start_ip='127.0.0.5', end_ip='127.0.0.7'),
]
@@ -49,45 +67,122 @@ def test_is_node_part_of_chain(skale, schain_on_contracts, node_config):
assert not chain_on_node
-def test_run_monitor_for_schain(
- skale,
- skale_ima,
- schain_on_contracts,
- node_config,
- schain_db,
- dutils
-):
- with mock.patch('core.schains.monitor.main.keep_tasks_running') as keep_tasks_running_mock:
- run_monitor_for_schain(
- skale,
- skale_ima,
- node_config,
- schain={'name': schain_db, 'partOfNode': 0, 'generation': 0},
- dutils=dutils,
- once=True
- )
- assert isinstance(keep_tasks_running_mock.call_args[0][0], ThreadPoolExecutor)
- assert isinstance(keep_tasks_running_mock.call_args[0][1][0], Task)
- assert isinstance(keep_tasks_running_mock.call_args[0][1][1], Task)
- assert keep_tasks_running_mock.call_args[0][2] == [None, None]
-
-
-def test_run_monitor_for_schain_left(
- skale,
- skale_ima,
- node_config,
- schain_db,
- dutils
-):
- schain_not_exists = 'not-on-node'
- upsert_schain_record(schain_not_exists)
- with mock.patch('core.schains.monitor.main.keep_tasks_running') as keep_tasks_running_mock:
- run_monitor_for_schain(
- skale,
- skale_ima,
- node_config,
- schain={'name': schain_not_exists, 'partOfNode': 0, 'generation': 0},
- dutils=dutils,
- once=True
- )
- keep_tasks_running_mock.assert_not_called()
+def test_config_task(skale, skale_ima, schain_db, schain_on_contracts, node_config):
+ stream_version = '2.3.0'
+ config_task = ConfigTask(
+ schain_name=schain_on_contracts,
+ skale=skale,
+ skale_ima=skale_ima,
+ node_config=node_config,
+ stream_version=stream_version,
+ )
+ assert config_task.needed
+ skale_ima.linker.has_schain = mock.Mock(return_value=True)
+
+ def get_monitor_mock(*args, **kwargs):
+ result = mock.MagicMock()
+ result.__name__ = 'TestConfigMonitor'
+ return result
+
+ with mock.patch('core.schains.monitor.main.RegularConfigMonitor', get_monitor_mock):
+ pipeline = config_task.create_pipeline()
+ pipeline()
+
+
+def test_skaled_task(skale, schain_db, schain_on_contracts, node_config, dutils):
+ record = upsert_schain_record(schain_on_contracts)
+ stream_version = '2.3.0'
+ skaled_task = SkaledTask(
+ schain_name=schain_on_contracts,
+ skale=skale,
+ node_config=node_config,
+ stream_version=stream_version,
+ dutils=dutils,
+ )
+ assert not skaled_task.needed
+ assert skaled_task.name == 'skaled'
+ assert skaled_task.start_ts == 0
+ assert skaled_task.stuck_timeout == 3600
+
+ record.set_config_version(stream_version)
+ assert skaled_task.needed
+
+ def get_monitor_mock(*args, **kwargs):
+ result = mock.MagicMock()
+ result.__name__ = 'TestSkaledMonitor'
+ return result
+
+ with mock.patch('core.schains.monitor.main.get_skaled_monitor', get_monitor_mock):
+ with mock.patch('core.schains.monitor.main.notify_checks'):
+ pipeline = skaled_task.create_pipeline()
+ pipeline()
+
+
+def test_execute_tasks(tmp_dir, _schain_name):
+ def run_stuck_pipeline(index: int) -> None:
+ logging.info('Running stuck pipeline %d', index)
+ iterations = 7
+ for i in range(iterations):
+ logging.info('Stuck pipeline %d beat', index)
+ time.sleep(1)
+
+ class StuckedTask(ITask):
+ def __init__(self, index) -> None:
+ self._name = 'stucked-task'
+ self.index = index
+ self._stuck_timeout = 3
+ self._start_ts = 0
+ self._future = Future()
+
+ @property
+ def name(self) -> str:
+ return self._name
+
+ @property
+ def future(self) -> Future:
+ return self._future
+
+ @future.setter
+ def future(self, value: Future) -> None:
+ self._future = value
+
+ @property
+ def start_ts(self) -> int:
+ return self._start_ts
+
+ @start_ts.setter
+ def start_ts(self, value: int) -> None:
+ print(f'Updating start_ts {self} {value}')
+ self._start_ts = value
+
+ @property
+ def task_name(self) -> str:
+ return self._task_name
+
+ @property
+ def stuck_timeout(self) -> int:
+ return self._stuck_timeout
+
+ @property
+ def needed(self) -> bool:
+ return True
+
+ def create_pipeline(self) -> Callable:
+ return functools.partial(run_stuck_pipeline, index=self.index)
+
+ class NotNeededTask(StuckedTask):
+ def __init__(self, index: int) -> None:
+ super().__init__(index=index)
+ self._name = 'not-needed-task'
+
+ @property
+ def needed(self) -> bool:
+ return False
+
+ process_report = ProcessReport(name=_schain_name)
+ tasks = [StuckedTask(0), NotNeededTask(1)]
+ execute_tasks(tasks=tasks, process_report=process_report, sleep_interval=1)
+
+ print(tasks[0], tasks[1])
+ assert tasks[0].start_ts == -1
+ assert tasks[1].start_ts == 0
diff --git a/tests/schains/monitor/process_test.py b/tests/schains/monitor/process_test.py
new file mode 100644
index 000000000..d5b495d03
--- /dev/null
+++ b/tests/schains/monitor/process_test.py
@@ -0,0 +1,32 @@
+import os
+import shutil
+import time
+from pathlib import Path
+
+import pytest
+
+from core.schains.process import ProcessReport
+
+from tools.configs.schains import SCHAINS_DIR_PATH
+
+
+@pytest.fixture
+def tmp_dir(_schain_name):
+ path = os.path.join(SCHAINS_DIR_PATH, _schain_name)
+ Path(path).mkdir()
+ try:
+ yield path
+ finally:
+ shutil.rmtree(path, ignore_errors=True)
+
+
+def test_process_report(_schain_name, tmp_dir):
+ report = ProcessReport(_schain_name)
+ with pytest.raises(FileNotFoundError):
+ assert report.ts == 0
+
+ ts = int(time.time())
+ pid = 10
+ report.update(pid=pid, ts=ts)
+ assert report.ts == ts
+ assert report.pid == pid
diff --git a/tests/schains/monitor/rpc_test.py b/tests/schains/monitor/rpc_test.py
index 65c26ea0b..8d93158bf 100644
--- a/tests/schains/monitor/rpc_test.py
+++ b/tests/schains/monitor/rpc_test.py
@@ -1,6 +1,6 @@
import datetime
-import time
import json
+import time
from unittest import mock
import freezegun
@@ -11,6 +11,7 @@
from core.schains.rpc import check_endpoint_blocks
from tools.configs.containers import SCHAIN_CONTAINER
from web.models.schain import SChainRecord
+from tests.utils import get_schain_struct
CURRENT_TIMESTAMP = 1594903080
CURRENT_DATETIME = datetime.datetime.utcfromtimestamp(CURRENT_TIMESTAMP)
@@ -21,7 +22,7 @@ def test_handle_failed_schain_rpc_no_container(schain_db, dutils, skaled_status)
image_name, container_name, _, _ = get_container_info(SCHAIN_CONTAINER, schain_db)
assert not handle_failed_schain_rpc(
- schain={'name': schain_db},
+ schain=get_schain_struct(schain_name=schain_db),
schain_record=schain_record,
skaled_status=skaled_status,
dutils=dutils,
@@ -44,7 +45,7 @@ def test_handle_failed_schain_rpc_exit_time_reached(
finished_at = container_info['stats']['State']['FinishedAt']
assert not handle_failed_schain_rpc(
- schain={'name': schain_db},
+ schain=get_schain_struct(schain_name=schain_db),
schain_record=schain_record,
skaled_status=skaled_status_exit_time_reached,
dutils=dutils,
@@ -72,7 +73,7 @@ def test_monitor_schain_downloading_snapshot(
finished_at = container_info['stats']['State']['FinishedAt']
handle_failed_schain_rpc(
- schain={'name': schain_db},
+ schain=get_schain_struct(schain_name=schain_db),
schain_record=schain_record,
skaled_status=skaled_status_downloading_snapshot,
dutils=dutils,
@@ -97,7 +98,7 @@ def test_handle_failed_schain_rpc_stuck_max_retries(
finished_at = container_info['stats']['State']['FinishedAt']
handle_failed_schain_rpc(
- schain={'name': schain_db},
+ schain=get_schain_struct(schain_name=schain_db),
schain_record=schain_record,
skaled_status=skaled_status,
dutils=dutils,
@@ -123,7 +124,7 @@ def test_monitor_container_exited(schain_db, dutils, cleanup_schain_containers,
assert schain_record.restart_count == 0
handle_failed_schain_rpc(
- schain={'name': schain_db},
+ schain=get_schain_struct(schain_name=schain_db),
schain_record=schain_record,
skaled_status=skaled_status,
dutils=dutils,
@@ -152,7 +153,7 @@ def test_handle_failed_schain_rpc_stuck(
assert schain_record.restart_count == 0
handle_failed_schain_rpc(
- schain={'name': schain_db},
+ schain=get_schain_struct(schain_name=schain_db),
schain_record=schain_record,
skaled_status=skaled_status,
dutils=dutils,
diff --git a/tests/schains/monitor/skaled_monitor_test.py b/tests/schains/monitor/skaled_monitor_test.py
index c63cda779..d9e296396 100644
--- a/tests/schains/monitor/skaled_monitor_test.py
+++ b/tests/schains/monitor/skaled_monitor_test.py
@@ -19,7 +19,7 @@
RecreateSkaledMonitor,
RegularSkaledMonitor,
RepairSkaledMonitor,
- UpdateConfigSkaledMonitor
+ UpdateConfigSkaledMonitor,
)
from core.schains.external_config import ExternalConfig
from core.schains.exit_scheduler import ExitScheduleFileManager
@@ -27,6 +27,8 @@
from tools.configs.containers import SCHAIN_CONTAINER, IMA_CONTAINER
from web.models.schain import SChainRecord
+from tests.utils import CURRENT_TS
+
CURRENT_TIMESTAMP = 1594903080
CURRENT_DATETIME = datetime.datetime.utcfromtimestamp(CURRENT_TIMESTAMP)
@@ -34,12 +36,12 @@
def run_ima_container_mock(schain: dict, mainnet_chain_id: int, dutils=None):
image_name, container_name, _, _ = get_container_info(
- IMA_CONTAINER, schain['name'])
+ IMA_CONTAINER, schain.name)
dutils.safe_rm(container_name)
dutils.run_container(
image_name=image_name,
name=container_name,
- entrypoint='bash -c "while true; do foo; sleep 2; done"'
+ entrypoint='bash -c "while true; do foo; sleep 2; done"',
)
@@ -51,15 +53,15 @@ def monitor_schain_container_mock(
start_ts=None,
dutils=None,
sync_node=False,
- historic_state=False
+ historic_state=False,
):
image_name, container_name, _, _ = get_container_info(
- SCHAIN_CONTAINER, schain['name'])
+ SCHAIN_CONTAINER, schain.name)
dutils.safe_rm(container_name)
dutils.run_container(
image_name=image_name,
name=container_name,
- entrypoint='bash -c "while true; do foo; sleep 2; done"'
+ entrypoint='bash -c "while true; do foo; sleep 2; done"',
)
@@ -69,12 +71,7 @@ def rotation_data(schain_db, skale):
@pytest.fixture
-def skaled_checks(
- schain_db,
- skale,
- rule_controller,
- dutils
-):
+def skaled_checks(schain_db, skale, rule_controller, dutils):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
return SkaledChecks(
@@ -82,7 +79,7 @@ def skaled_checks(
schain_record=schain_record,
rule_controller=rule_controller,
dutils=dutils,
- sync_node=False
+ sync_node=False,
)
@@ -98,8 +95,9 @@ def skaled_am(
secret_key,
ssl_folder,
ima_migration_schedule,
+ ncli_status,
dutils,
- skaled_checks
+ skaled_checks,
):
name = schain_db
schain = skale.schains.get_by_name(name)
@@ -107,8 +105,9 @@ def skaled_am(
schain=schain,
rule_controller=rule_controller,
node_config=node_config,
+ ncli_status=ncli_status,
checks=skaled_checks,
- dutils=dutils
+ dutils=dutils,
)
@@ -119,19 +118,14 @@ def config(self) -> CheckRes:
@pytest.fixture
-def skaled_checks_no_config(
- schain_db,
- skale,
- rule_controller,
- dutils
-):
+def skaled_checks_no_config(schain_db, skale, rule_controller, dutils):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
return SkaledChecksNoConfig(
schain_name=name,
schain_record=schain_record,
rule_controller=rule_controller,
- dutils=dutils
+ dutils=dutils,
)
@@ -146,101 +140,76 @@ def rotation_id_updated(self) -> CheckRes:
@pytest.fixture
-def skaled_checks_outdated_config(
- schain_db,
- skale,
- rule_controller,
- dutils
-):
+def skaled_checks_outdated_config(schain_db, skale, rule_controller, dutils):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
return SkaledChecksConfigOutdated(
schain_name=name,
schain_record=schain_record,
rule_controller=rule_controller,
- dutils=dutils
+ dutils=dutils,
)
-def test_get_skaled_monitor_no_config(skaled_am, skaled_checks_no_config, skaled_status, schain_db):
+def test_get_skaled_monitor_no_config(
+ skaled_am, skaled_checks_no_config, skaled_status, schain_db, ncli_status
+):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
mon = get_skaled_monitor(
- skaled_am,
- skaled_checks_no_config.get_all(),
- schain_record,
- skaled_status
+ skaled_am, skaled_checks_no_config.get_all(), schain_record, skaled_status, ncli_status
)
assert mon == NoConfigSkaledMonitor
-def test_get_skaled_monitor_regular_and_backup(skaled_am, skaled_checks, skaled_status, schain_db):
+def test_get_skaled_monitor_regular_and_backup(
+ skaled_am, skaled_checks, skaled_status, schain_db, ncli_status
+):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
mon = get_skaled_monitor(
- skaled_am,
- skaled_checks.get_all(),
- schain_record,
- skaled_status
+ skaled_am, skaled_checks.get_all(), schain_record, skaled_status, ncli_status
)
assert mon == RegularSkaledMonitor
schain_record.set_backup_run(True)
mon = get_skaled_monitor(
- skaled_am,
- skaled_checks.get_all(),
- schain_record,
- skaled_status
+ skaled_am, skaled_checks.get_all(), schain_record, skaled_status, ncli_status
)
assert mon == RegularSkaledMonitor
schain_record.set_first_run(False)
mon = get_skaled_monitor(
- skaled_am,
- skaled_checks.get_all(),
- schain_record,
- skaled_status
+ skaled_am, skaled_checks.get_all(), schain_record, skaled_status, ncli_status
)
assert mon == RegularSkaledMonitor
schain_record.set_new_schain(False)
mon = get_skaled_monitor(
- skaled_am,
- skaled_checks.get_all(),
- schain_record,
- skaled_status
+ skaled_am, skaled_checks.get_all(), schain_record, skaled_status, ncli_status
)
assert mon == BackupSkaledMonitor
-def test_get_skaled_monitor_repair(skaled_am, skaled_checks, skaled_status, schain_db):
+def test_get_skaled_monitor_repair(skaled_am, skaled_checks, skaled_status, schain_db, ncli_status):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
- schain_record.set_repair_mode(True)
+ schain_record.set_repair_date(datetime.datetime.utcfromtimestamp(CURRENT_TS - 10))
mon = get_skaled_monitor(
- skaled_am,
- skaled_checks.get_all(),
- schain_record,
- skaled_status
+ skaled_am, skaled_checks.get_all(), schain_record, skaled_status, ncli_status
)
assert mon == RepairSkaledMonitor
def test_get_skaled_monitor_repair_skaled_status(
- skaled_am,
- skaled_checks,
- schain_db,
- skaled_status_repair
+ skaled_am, skaled_checks, schain_db, skaled_status_repair, ncli_status
):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
mon = get_skaled_monitor(
- skaled_am,
- skaled_checks.get_all(),
- schain_record,
- skaled_status_repair
+ skaled_am, skaled_checks.get_all(), schain_record, skaled_status_repair, ncli_status
)
assert mon == RepairSkaledMonitor
@@ -249,7 +218,8 @@ def test_get_skaled_monitor_repair_skaled_status(
skaled_checks.get_all(),
schain_record,
skaled_status_repair,
- automatic_repair=False
+ ncli_status,
+ automatic_repair=False,
)
assert mon == RegularSkaledMonitor
@@ -277,19 +247,14 @@ def container(self) -> CheckRes:
@pytest.fixture
-def skaled_checks_new_config(
- schain_db,
- skale,
- rule_controller,
- dutils
-):
+def skaled_checks_new_config(schain_db, skale, rule_controller, dutils):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
return SkaledChecksWithConfig(
schain_name=name,
schain_record=schain_record,
rule_controller=rule_controller,
- dutils=dutils
+ dutils=dutils,
)
@@ -308,6 +273,7 @@ def test_get_skaled_monitor_reload_group(
secret_keys,
ssl_folder,
skaled_checks,
+ ncli_status,
dutils
):
name = schain_db
@@ -319,8 +285,7 @@ def test_get_skaled_monitor_reload_group(
schain = skale.schains.get_by_name(name)
with mock.patch(
- f'{__name__}.SkaledActionManager.upstream_finish_ts',
- new_callable=mock.PropertyMock
+ f'{__name__}.SkaledActionManager.upstream_finish_ts', new_callable=mock.PropertyMock
) as finish_ts_mock:
finish_ts_mock.return_value = CURRENT_TIMESTAMP - 10
skaled_am = SkaledActionManager(
@@ -328,14 +293,10 @@ def test_get_skaled_monitor_reload_group(
rule_controller=rule_controller,
node_config=node_config,
checks=skaled_checks,
- dutils=dutils
- )
- mon = get_skaled_monitor(
- skaled_am,
- state,
- schain_record,
- skaled_status
+ ncli_status=ncli_status,
+ dutils=dutils,
)
+ mon = get_skaled_monitor(skaled_am, state, schain_record, skaled_status, ncli_status)
assert mon == RegularSkaledMonitor
finish_ts_mock.return_value = CURRENT_TIMESTAMP + 10
skaled_am = SkaledActionManager(
@@ -343,14 +304,10 @@ def test_get_skaled_monitor_reload_group(
rule_controller=rule_controller,
node_config=node_config,
checks=skaled_checks,
- dutils=dutils
- )
- mon = get_skaled_monitor(
- skaled_am,
- state,
- schain_record,
- skaled_status
+ ncli_status=ncli_status,
+ dutils=dutils,
)
+ mon = get_skaled_monitor(skaled_am, state, schain_record, skaled_status, ncli_status)
assert mon == ReloadGroupSkaledMonitor
@@ -369,7 +326,8 @@ def test_get_skaled_monitor_reload_ip(
secret_keys,
ssl_folder,
skaled_checks,
- dutils
+ ncli_status,
+ dutils,
):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
@@ -386,26 +344,17 @@ def test_get_skaled_monitor_reload_ip(
rule_controller=rule_controller,
node_config=node_config,
checks=skaled_checks,
- dutils=dutils
- )
- mon = get_skaled_monitor(
- skaled_am,
- state,
- schain_record,
- skaled_status
+ ncli_status=ncli_status,
+ dutils=dutils,
)
+ mon = get_skaled_monitor(skaled_am, state, schain_record, skaled_status, ncli_status)
assert mon == RegularSkaledMonitor
estate = econfig.read()
estate['reload_ts'] = CURRENT_TIMESTAMP + 10
econfig.write(estate)
- mon = get_skaled_monitor(
- skaled_am,
- state,
- schain_record,
- skaled_status
- )
+ mon = get_skaled_monitor(skaled_am, state, schain_record, skaled_status, ncli_status)
assert mon == ReloadIpSkaledMonitor
@@ -423,7 +372,8 @@ def test_get_skaled_monitor_new_node(
skaled_status,
skaled_checks,
ima_migration_schedule,
- dutils
+ ncli_status,
+ dutils,
):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
@@ -431,23 +381,20 @@ def test_get_skaled_monitor_new_node(
finish_ts = CURRENT_TIMESTAMP + 10
with mock.patch(
- f'{__name__}.SkaledActionManager.finish_ts',
- new_callable=mock.PropertyMock
+ f'{__name__}.SkaledActionManager.finish_ts', new_callable=mock.PropertyMock
) as finish_ts_mock:
skaled_am = SkaledActionManager(
schain=schain,
rule_controller=rule_controller,
node_config=node_config,
+ ncli_status=ncli_status,
checks=skaled_checks,
- dutils=dutils
+ dutils=dutils,
)
finish_ts_mock.return_value = finish_ts
mon = get_skaled_monitor(
- skaled_am,
- skaled_checks.get_all(),
- schain_record,
- skaled_status
+ skaled_am, skaled_checks.get_all(), schain_record, skaled_status, ncli_status
)
assert mon == NewNodeSkaledMonitor
@@ -458,6 +405,7 @@ def test_get_skaled_monitor_update_config(
skaled_checks_new_config,
schain_db,
skaled_status_exit_time_reached,
+ ncli_status,
):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
@@ -465,29 +413,20 @@ def test_get_skaled_monitor_update_config(
status['skaled_container'] = False
mon = get_skaled_monitor(
- skaled_am,
- status,
- schain_record,
- skaled_status_exit_time_reached
+ skaled_am, status, schain_record, skaled_status_exit_time_reached, ncli_status
)
assert mon == UpdateConfigSkaledMonitor
status = skaled_checks_new_config.get_all()
status['skaled_container'] = False
mon = get_skaled_monitor(
- skaled_am,
- status,
- schain_record,
- skaled_status_exit_time_reached
+ skaled_am, status, schain_record, skaled_status_exit_time_reached, ncli_status
)
assert mon == UpdateConfigSkaledMonitor
def test_get_skaled_monitor_recreate(
- skaled_am,
- skaled_checks,
- schain_db,
- skaled_status
+ skaled_am, skaled_checks, schain_db, skaled_status, ncli_status
):
name = schain_db
schain_record = SChainRecord.get_by_name(name)
@@ -501,7 +440,8 @@ def test_get_skaled_monitor_recreate(
skaled_am,
status,
schain_record,
- skaled_status
+ skaled_status,
+ ncli_status
)
assert mon == RegularSkaledMonitor
status['skaled_container'] = True
@@ -509,17 +449,13 @@ def test_get_skaled_monitor_recreate(
skaled_am,
status,
schain_record,
- skaled_status
+ skaled_status,
+ ncli_status
)
assert mon == RecreateSkaledMonitor
-def test_regular_skaled_monitor(
- skaled_am,
- skaled_checks,
- clean_docker,
- dutils
-):
+def test_regular_skaled_monitor(skaled_am, skaled_checks, clean_docker, dutils):
mon = RegularSkaledMonitor(skaled_am, skaled_checks)
mon.run()
assert skaled_am.rc.is_rules_synced
@@ -533,8 +469,7 @@ def test_backup_skaled_monitor(skaled_am, skaled_checks, clean_docker, dutils):
mon.run()
assert skaled_am.rc.is_rules_synced
assert dutils.get_vol(skaled_am.name)
- schain_container = dutils.safe_get_container(
- f'skale_schain_{skaled_am.name}')
+ schain_container = dutils.safe_get_container(f'skale_schain_{skaled_am.name}')
assert schain_container
assert '--download-snapshot' in dutils.get_cmd(schain_container.id)
assert dutils.safe_get_container(f'skale_ima_{skaled_am.name}')
@@ -549,8 +484,7 @@ def test_repair_skaled_monitor(skaled_am, skaled_checks, clean_docker, dutils):
assert dutils.get_vol(skaled_am.name)
assert dutils.get_vol_created_ts(skaled_am.name) > ts_before
- schain_container = dutils.safe_get_container(
- f'skale_schain_{skaled_am.name}')
+ schain_container = dutils.safe_get_container(f'skale_schain_{skaled_am.name}')
assert schain_container
assert '--download-snapshot' in dutils.get_cmd(schain_container.id)
assert dutils.get_container_created_ts(schain_container.id) > ts_before
@@ -561,8 +495,9 @@ def test_group_reload_skaled_monitor(skaled_am, skaled_checks, clean_docker, dut
mon = ReloadGroupSkaledMonitor(skaled_am, skaled_checks)
ts = time.time()
esfm = ExitScheduleFileManager(mon.am.name)
- with mock.patch('core.schains.monitor.action.get_finish_ts_from_latest_upstream',
- return_value=ts):
+ with mock.patch(
+ 'core.schains.monitor.action.get_finish_ts_from_latest_upstream', return_value=ts
+ ):
mon.run()
assert esfm.exit_ts == ts
assert skaled_am.rc.is_rules_synced
@@ -574,8 +509,9 @@ def test_group_reload_skaled_monitor(skaled_am, skaled_checks, clean_docker, dut
@pytest.mark.skip
def test_group_reload_skaled_monitor_failed_skaled(skaled_am, skaled_checks, clean_docker, dutils):
mon = ReloadGroupSkaledMonitor(skaled_am, skaled_checks)
- with mock.patch('core.schains.monitor.containers.run_schain_container') \
- as run_skaled_container_mock:
+ with mock.patch(
+ 'core.schains.monitor.containers.run_schain_container'
+ ) as run_skaled_container_mock:
mon.run()
assert skaled_am.rc.is_rules_synced
assert run_skaled_container_mock.assert_not_called()
@@ -586,19 +522,13 @@ def test_recreate_skaled_monitor(skaled_am, skaled_checks, clean_docker, dutils)
ts_before = time.time()
time.sleep(1)
mon.run()
- schain_container = dutils.safe_get_container(
- f'skale_schain_{skaled_am.name}')
+ schain_container = dutils.safe_get_container(f'skale_schain_{skaled_am.name}')
assert schain_container
assert dutils.get_container_created_ts(schain_container.id) > ts_before
def test_update_config_skaled_monitor(
- skaled_am,
- skaled_checks,
- dutils,
- clean_docker,
- upstreams,
- skaled_status_exit_time_reached
+ skaled_am, skaled_checks, dutils, clean_docker, upstreams, skaled_status_exit_time_reached
):
name = skaled_checks.name
ts_before = time.time()
@@ -607,13 +537,10 @@ def test_update_config_skaled_monitor(
mon.run()
assert dutils.get_vol(name)
assert dutils.get_vol_created_ts(name) > ts_before
- schain_container = dutils.safe_get_container(
- f'skale_schain_{name}'
- )
+ schain_container = dutils.safe_get_container(f'skale_schain_{name}')
assert schain_container
assert dutils.get_container_created_ts(schain_container.id) > ts_before
- os.stat(os.path.join(schain_config_dir(name),
- f'schain_{name}.json')).st_mtime > ts_before
+ os.stat(os.path.join(schain_config_dir(name), f'schain_{name}.json')).st_mtime > ts_before
def test_no_config_monitor(skaled_am, skaled_checks, clean_docker, dutils):
@@ -629,7 +556,6 @@ def test_new_node_monitor(skaled_am, skaled_checks, clean_docker, dutils):
mon.run()
assert skaled_am.rc.is_rules_synced
assert dutils.get_vol(skaled_am.name)
- schain_container = dutils.safe_get_container(
- f'skale_schain_{skaled_am.name}')
+ schain_container = dutils.safe_get_container(f'skale_schain_{skaled_am.name}')
assert schain_container
assert '--download-snapshot' in dutils.get_cmd(schain_container.id)
diff --git a/tests/schains/process_manager_test.py b/tests/schains/process_manager_test.py
new file mode 100644
index 000000000..ab215cc68
--- /dev/null
+++ b/tests/schains/process_manager_test.py
@@ -0,0 +1,108 @@
+import mock
+import logging
+import os
+import pathlib
+import shutil
+import time
+
+import psutil
+import pytest
+
+from core.schains.process import ProcessReport, terminate_process
+from core.schains.process_manager import run_pm_schain
+from tools.configs.schains import SCHAINS_DIR_PATH
+from tests.utils import get_schain_struct
+
+logger = logging.getLogger(__name__)
+
+MAX_ITERATIONS = 100
+
+
+@pytest.fixture
+def tmp_dir(_schain_name):
+ path = os.path.join(SCHAINS_DIR_PATH, _schain_name)
+ pathlib.Path(path).mkdir()
+ try:
+ yield path
+ finally:
+ shutil.rmtree(path, ignore_errors=True)
+
+
+def target_regular_mock(*args, **kwargs):
+ schain_name = args[1].name
+ process_report = ProcessReport(schain_name)
+ process_report.update(os.getpid(), int(time.time()))
+ logger.info('Starting regular test task runner')
+ iterations = 5
+ for i in range(iterations):
+ process_report.ts = int(time.time())
+ logger.info('Regular test task runner beat %s', i)
+ time.sleep(1)
+
+
+def target_stuck_mock(*args, **kwargs):
+ schain_name = ProcessReport(args[1].name)
+ ProcessReport(schain_name).update(os.getpid(), int(time.time()))
+ logger.info('Starting stucked test task runner')
+ iterations = 10000
+ for i in range(iterations):
+ logger.info('Stuck test task runner beat %s', i)
+ time.sleep(1)
+
+
+def wait_for_process_report(process_report):
+ wait_it = 0
+ while wait_it < MAX_ITERATIONS and not process_report.is_exist():
+ time.sleep(0.5)
+ wait_it += 1
+ assert process_report.is_exist()
+
+
+def test_run_pm_schain(tmp_dir, skale, skale_ima, node_config, _schain_name):
+ schain = get_schain_struct(schain_name=_schain_name)
+
+ timeout = 7
+
+ with mock.patch('core.schains.process_manager.start_tasks', target_regular_mock):
+ run_pm_schain(skale, skale_ima, node_config, schain, timeout=timeout)
+
+ process_report = ProcessReport(schain.name)
+ wait_for_process_report(process_report)
+
+ pid = process_report.pid
+
+ try:
+ assert psutil.Process(pid).is_running()
+ start_ts = int(time.time())
+
+ while int(time.time()) - start_ts < 2 * timeout:
+ time.sleep(1)
+ assert psutil.Process(pid).status() not in ('dead', 'stopped')
+ finally:
+ pid = ProcessReport(_schain_name).pid
+ terminate_process(pid)
+
+ old_pid = pid
+ wait_it = 0
+ while wait_it < MAX_ITERATIONS and process_report.pid == old_pid:
+ time.sleep(0.5)
+ wait_it += 1
+
+ with mock.patch('core.schains.process_manager.start_tasks', target_stuck_mock):
+ run_pm_schain(skale, skale_ima, node_config, schain, timeout=timeout)
+
+ start_ts = int(time.time())
+
+ while int(time.time()) - start_ts < 2 * timeout:
+ try:
+ psutil.Process(pid).is_running()
+ except psutil.NoSuchProcess:
+ break
+ time.sleep(1)
+
+ try:
+ with pytest.raises(psutil.NoSuchProcess):
+ psutil.Process(pid).is_running()
+ finally:
+ pid = ProcessReport(_schain_name).pid
+ terminate_process(pid)
diff --git a/tests/schains/skaled_status_test.py b/tests/schains/skaled_status_test.py
index 4981698ef..b11422fe1 100644
--- a/tests/schains/skaled_status_test.py
+++ b/tests/schains/skaled_status_test.py
@@ -1,6 +1,15 @@
-from core.schains.skaled_status import SkaledStatus
+from core.schains.status import (
+ get_node_cli_status,
+ node_cli_status_filepath,
+ NodeCliStatus,
+ SkaledStatus,
+)
from core.schains.config.directory import skaled_status_filepath
+CURRENT_TS = 1594903080
+
+NCLI_STATUS_DICT = {'repair_ts': CURRENT_TS, 'snapshot_from': '127.0.0.1'}
+
def test_skaled_status(skaled_status, _schain_name):
status_filepath = skaled_status_filepath(_schain_name)
@@ -9,14 +18,14 @@ def test_skaled_status(skaled_status, _schain_name):
assert skaled_status.subsystem_running == {
'SnapshotDownloader': False,
'Blockchain': False,
- 'Rpc': False
+ 'Rpc': False,
}
assert skaled_status.exit_state == {
'ClearDataDir': False,
'StartAgain': False,
'StartFromSnapshot': False,
- 'ExitTimeReached': False
+ 'ExitTimeReached': False,
}
@@ -47,3 +56,21 @@ def test_log(skaled_status, _schain_name, caplog):
status_filepath = skaled_status_filepath(_schain_name)
skaled_status = SkaledStatus(filepath=status_filepath)
skaled_status.log()
+
+
+def test_node_cli_status_empty(_schain_name):
+ cli_status = get_node_cli_status(_schain_name)
+ assert cli_status is None
+
+ status_filepath = node_cli_status_filepath(_schain_name)
+ cli_status = NodeCliStatus(filepath=status_filepath)
+
+ assert cli_status.repair_ts is None
+ assert cli_status.snapshot_from is None
+
+
+def test_node_cli_status_repair(_schain_name, ncli_status):
+ cli_status = get_node_cli_status(_schain_name)
+
+ assert cli_status.repair_ts == CURRENT_TS
+ assert cli_status.snapshot_from == '127.0.0.1'
diff --git a/tests/schains/task_test.py b/tests/schains/task_test.py
deleted file mode 100644
index f5c574094..000000000
--- a/tests/schains/task_test.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import functools
-import time
-
-import pytest
-
-from core.schains.task import run_tasks, Task
-
-ITERATIONS = 10
-SCHAINS_NUM = 10
-
-
-class StopActionError(Exception):
- pass
-
-
-def action(name):
- for i in range(ITERATIONS):
- time.sleep(2)
- raise StopActionError(f'Stopping {name}')
-
-
-@pytest.mark.skip
-def test_tasks():
- tasks = [
- Task(
- f'test-schain-{i}',
- functools.partial(action, name=f'test-schain-{i}'),
- i
- )
- for i in range(SCHAINS_NUM)
- ]
- run_tasks(tasks=tasks)
- time.sleep(3)
diff --git a/tests/skale-data/config/containers.json b/tests/skale-data/config/containers.json
index 3561d2539..41f3f514e 100644
--- a/tests/skale-data/config/containers.json
+++ b/tests/skale-data/config/containers.json
@@ -1,7 +1,7 @@
{
"schain": {
"name": "skalenetwork/schain",
- "version": "3.16.1",
+ "version": "3.19.0",
"custom_args": {
"ulimits_list": [
{
@@ -31,8 +31,8 @@
},
"ima": {
"name": "skalenetwork/ima",
- "version": "2.0.0-develop.3",
- "new_version": "2.0.0-beta.9",
+ "version": "2.1.0-beta.3",
+ "new_version": "2.1.0",
"custom_args": {},
"args": {
"restart_policy": {
diff --git a/tests/skale-data/config/static_params.yaml b/tests/skale-data/config/static_params.yaml
index 03fafefff..7cfed05fe 100644
--- a/tests/skale-data/config/static_params.yaml
+++ b/tests/skale-data/config/static_params.yaml
@@ -7,10 +7,31 @@ common:
skaled: 0.8
ima: 0.2
volume_limits:
- max_consensus_storage_bytes: 0.3
- max_skaled_leveldb_storage_bytes: 0.3
- max_file_storage_bytes: 0.3
- max_reserved_storage_bytes: 0.1
+ default:
+ max_consensus_storage_bytes: 0.3
+ max_skaled_leveldb_storage_bytes: 0.3
+ max_file_storage_bytes: 0.3
+ max_reserved_storage_bytes: 0.1
+ no_filestorage:
+ max_consensus_storage_bytes: 0.45
+ max_skaled_leveldb_storage_bytes: 0.45
+ max_file_storage_bytes: 0.0
+ max_reserved_storage_bytes: 0.1
+ max_contract_storage:
+ max_consensus_storage_bytes: 0.135
+ max_skaled_leveldb_storage_bytes: 0.765
+ max_file_storage_bytes: 0.0
+ max_reserved_storage_bytes: 0.1
+ max_filestorage:
+ max_consensus_storage_bytes: 0.135
+ max_skaled_leveldb_storage_bytes: 0.135
+ max_file_storage_bytes: 0.63
+ max_reserved_storage_bytes: 0.1
+ max_consensus_db:
+ max_consensus_storage_bytes: 0.72
+ max_skaled_leveldb_storage_bytes: 0.18
+ max_file_storage_bytes: 0.0
+ max_reserved_storage_bytes: 0.1
leveldb_limits:
contract_storage: 0.6
db_storage: 0.2 # leveldb may use x2 storage, so 0.4 divided by 2, actually using 0.4
@@ -37,18 +58,37 @@ envs:
docker-compose: 1.27.4
schain:
- contractStorageZeroValuePatchTimestamp:
- default: 800000
- test-schain: 1500000
- revertableFSPatchTimestamp: 1000000
- contractStoragePatchTimestamp: 1000000
- snapshotIntervalSec: 0
+ contractStorageZeroValuePatchTimestamp: 1681128000
+ revertableFSPatchTimestamp: 1681473600
+ contractStoragePatchTimestamp: 1681732800
+ verifyDaSigsPatchTimestamp: 1681300800
+ storageDestructionPatchTimestamp: 1703851200
+ powCheckPatchTimestamp: 1703592000
+ skipInvalidTransactionsPatchTimestamp: 1703764800
+ pushZeroPatchTimestamp: 1712142000
+ precompiledConfigPatchTimestamp: 1712314800
+ correctForkInPowPatchTimestamp: 1711969200
+ EIP1559TransactionsPatchTimestamp: 1722942000
+ fastConsensusPatchTimestamp: 1723114800
+ flexibleDeploymentPatchTimestamp:
+ default: 0
+ honorable-steel-rasalhague: 1723460400
+ elated-tan-skat: 1723460400
+ green-giddy-denebola: 1723460400
+ parallel-stormy-spica: 1723460400
+ verifyBlsSyncPatchTimestamp: 1722855600
+ snapshotIntervalSec: 86400
emptyBlockIntervalMs: 10000
snapshotDownloadTimeout: 18000
snapshotDownloadInactiveTimeout: 120
+ ima:
+ time_frame:
+ before: 1800
+ after: 900
+
schain_cmd:
- ["-v 3", "--web3-trace", "--enable-debug-behavior-apis", "--aa no"]
+ ["-v 2", "--aa no"]
node:
common:
@@ -63,35 +103,45 @@ envs:
collectionQueueSize: 2
collectionDuration: 10
transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
maxOpenLeveldbFiles: 25
medium:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
large:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
test:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
test4:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
testnet:
server:
@@ -114,18 +164,32 @@ envs:
docker-compose: 1.27.4
schain:
- contractStorageZeroValuePatchTimestamp:
- default: 800000
- test-schain: 1500000
- revertableFSPatchTimestamp: 1000000
- contractStoragePatchTimestamp: 1000000
- snapshotIntervalSec: 0
+ contractStorageZeroValuePatchTimestamp: 1678100400
+ revertableFSPatchTimestamp: 1678100400
+ contractStoragePatchTimestamp: 1678100400
+ verifyDaSigsPatchTimestamp: 1678100400
+ storageDestructionPatchTimestamp: 1702393200
+ powCheckPatchTimestamp: 1702296000
+ skipInvalidTransactionsPatchTimestamp: 1702382400
+ pushZeroPatchTimestamp: 1710331200
+ precompiledConfigPatchTimestamp: 1710331200
+ correctForkInPowPatchTimestamp: 1710331200
+ EIP1559TransactionsPatchTimestamp: 1721818800
+ fastConsensusPatchTimestamp: 1721822400
+ flexibleDeploymentPatchTimestamp: 1721826000
+ verifyBlsSyncPatchTimestamp: 1721829600
+ snapshotIntervalSec: 86400
emptyBlockIntervalMs: 10000
snapshotDownloadTimeout: 18000
snapshotDownloadInactiveTimeout: 120
+ ima:
+ time_frame:
+ before: 1800
+ after: 900
+
schain_cmd:
- ["-v 3", "--web3-trace", "--enable-debug-behavior-apis", "--aa no"]
+ ["-v 2", "--aa no"]
node:
common:
@@ -140,35 +204,45 @@ envs:
collectionQueueSize: 2
collectionDuration: 10
transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
maxOpenLeveldbFiles: 25
medium:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
large:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
test:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
test4:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
qanet:
server:
@@ -191,64 +265,87 @@ envs:
docker-compose: 1.27.4
schain:
- contractStorageZeroValuePatchTimestamp:
- default: 800000
- test-schain: 1500000
- revertableFSPatchTimestamp: 1000000
- contractStoragePatchTimestamp: 1000000
- snapshotIntervalSec: 0
+ contractStorageZeroValuePatchTimestamp: 1691146800
+ revertableFSPatchTimestamp: 1691146800
+ contractStoragePatchTimestamp: 1691146800
+ verifyDaSigsPatchTimestamp: 1691146800
+ storageDestructionPatchTimestamp: 1699618500
+ powCheckPatchTimestamp: 1699625700
+ skipInvalidTransactionsPatchTimestamp: 1699632900
+ pushZeroPatchTimestamp: 1712142000
+ precompiledConfigPatchTimestamp: 1712314800
+ correctForkInPowPatchTimestamp: 1711969200
+ EIP1559TransactionsPatchTimestamp: 0
+ fastConsensusPatchTimestamp: 0
+ flexibleDeploymentPatchTimestamp: 0
+ verifyBlsSyncPatchTimestamp: 0
+ snapshotIntervalSec: 3600
emptyBlockIntervalMs: 10000
snapshotDownloadTimeout: 18000
snapshotDownloadInactiveTimeout: 120
+ ima:
+ time_frame:
+ before: 1800
+ after: 900
+
schain_cmd:
- ["-v 3", "--web3-trace", "--enable-debug-behavior-apis", "--aa no"]
+ ["-v 2", "--aa no"]
node:
- admin:
- automatic_repair: false
common:
bindIP: "0.0.0.0"
logLevel: "info"
logLevelConfig: "info"
pg-threads: 10
pg-threads-limit: 10
+ admin:
+ automatic_repair: false
small:
minCacheSize: 1000000
maxCacheSize: 2000000
collectionQueueSize: 2
collectionDuration: 10
transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
maxOpenLeveldbFiles: 25
medium:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
large:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
test:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
test4:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
-
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
devnet:
server:
@@ -271,12 +368,23 @@ envs:
docker-compose: 1.27.4
schain:
- contractStorageZeroValuePatchTimestamp:
- default: 800000
- test-schain: 1500000
+ contractStorageZeroValuePatchTimestamp: 1000000
revertableFSPatchTimestamp: 1000000
contractStoragePatchTimestamp: 1000000
- snapshotIntervalSec: 0
+ verifyDaSigsPatchTimestamp: 1000000
+ storageDestructionPatchTimestamp: 1000000
+ powCheckPatchTimestamp: 1000000
+ skipInvalidTransactionsPatchTimestamp: 1000000
+ pushZeroPatchTimestamp: 1712142000
+ precompiledConfigPatchTimestamp: 1712314800
+ correctForkInPowPatchTimestamp: 1711969200
+ EIP1559TransactionsPatchTimestamp: 0
+ fastConsensusPatchTimestamp: 0
+ flexibleDeploymentPatchTimestamp:
+ default: 0
+ test-schain: 1723460400
+ verifyBlsSyncPatchTimestamp: 0
+ snapshotIntervalSec: 3600
emptyBlockIntervalMs: 10000
snapshotDownloadTimeout: 18000
snapshotDownloadInactiveTimeout: 120
@@ -290,8 +398,6 @@ envs:
["-v 3", "--web3-trace", "--enable-debug-behavior-apis", "--aa no"]
node:
- admin:
- automatic_repair: true
common:
bindIP: "0.0.0.0"
logLevel: "info"
@@ -304,32 +410,42 @@ envs:
collectionQueueSize: 2
collectionDuration: 10
transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
maxOpenLeveldbFiles: 25
medium:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
large:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
test:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
test4:
minCacheSize: 8000000
maxCacheSize: 16000000
collectionQueueSize: 20
collectionDuration: 60
- transactionQueueSize: 100000
- maxOpenLeveldbFiles: 256
+ transactionQueueSize: 1000
+ transactionQueueLimitBytes: 69206016
+ futureTransactionQueueLimitBytes: 140509184
+ maxOpenLeveldbFiles: 1000
diff --git a/tests/skale-data/node_data/resource_allocation.json b/tests/skale-data/node_data/resource_allocation.json
index b0733174c..49d8764aa 100644
--- a/tests/skale-data/node_data/resource_allocation.json
+++ b/tests/skale-data/node_data/resource_allocation.json
@@ -1,110 +1,315 @@
{
"schain": {
"cpu_shares": {
- "test4": 22,
- "test": 22,
- "small": 5,
- "medium": 22,
- "large": 716,
- "sync_node": 716
+ "test4": 102,
+ "test": 102,
+ "small": 6,
+ "medium": 102,
+ "large": 819
},
"mem": {
- "test4": 300647710,
- "test": 300647710,
- "small": 75161927,
- "medium": 300647710,
- "large": 9620726743,
- "sync_node": 9620726743
+ "test4": 1325679575,
+ "test": 1325679575,
+ "small": 82854973,
+ "medium": 1325679575,
+ "large": 10605436600
},
"disk": {
- "sync_node": 75999936512,
- "large": 75999936512,
- "medium": 2374998016,
- "small": 593749504,
- "test": 2374998016,
- "test4": 2374998016
+ "large": 1687199940608,
+ "medium": 210899992576,
+ "small": 13181249536,
+ "test": 210899992576,
+ "test4": 210899992576
},
"volume_limits": {
- "sync_node": {
- "max_consensus_storage_bytes": 22799980953,
- "max_file_storage_bytes": 22799980953,
- "max_reserved_storage_bytes": 7599993651,
- "max_skaled_leveldb_storage_bytes": 22799980953
- },
"large": {
- "max_consensus_storage_bytes": 22799980953,
- "max_file_storage_bytes": 22799980953,
- "max_reserved_storage_bytes": 7599993651,
- "max_skaled_leveldb_storage_bytes": 22799980953
+ "default": {
+ "max_consensus_storage_bytes": 506159982182,
+ "max_file_storage_bytes": 506159982182,
+ "max_reserved_storage_bytes": 168719994060,
+ "max_skaled_leveldb_storage_bytes": 506159982182
+ },
+ "max_consensus_db": {
+ "max_consensus_storage_bytes": 1214783957237,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 168719994060,
+ "max_skaled_leveldb_storage_bytes": 303695989309
+ },
+ "max_contract_storage": {
+ "max_consensus_storage_bytes": 227771991982,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 168719994060,
+ "max_skaled_leveldb_storage_bytes": 1290707954565
+ },
+ "max_filestorage": {
+ "max_consensus_storage_bytes": 227771991982,
+ "max_file_storage_bytes": 1062935962583,
+ "max_reserved_storage_bytes": 168719994060,
+ "max_skaled_leveldb_storage_bytes": 227771991982
+ },
+ "no_filestorage": {
+ "max_consensus_storage_bytes": 759239973273,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 168719994060,
+ "max_skaled_leveldb_storage_bytes": 759239973273
+ }
},
"medium": {
- "max_consensus_storage_bytes": 712499404,
- "max_file_storage_bytes": 712499404,
- "max_reserved_storage_bytes": 237499801,
- "max_skaled_leveldb_storage_bytes": 712499404
+ "default": {
+ "max_consensus_storage_bytes": 63269997772,
+ "max_file_storage_bytes": 63269997772,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 63269997772
+ },
+ "max_consensus_db": {
+ "max_consensus_storage_bytes": 151847994654,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 37961998663
+ },
+ "max_contract_storage": {
+ "max_consensus_storage_bytes": 28471498997,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 161338494320
+ },
+ "max_filestorage": {
+ "max_consensus_storage_bytes": 28471498997,
+ "max_file_storage_bytes": 132866995322,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 28471498997
+ },
+ "no_filestorage": {
+ "max_consensus_storage_bytes": 94904996659,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 94904996659
+ }
},
"small": {
- "max_consensus_storage_bytes": 178124851,
- "max_file_storage_bytes": 178124851,
- "max_reserved_storage_bytes": 59374950,
- "max_skaled_leveldb_storage_bytes": 178124851
+ "default": {
+ "max_consensus_storage_bytes": 3954374860,
+ "max_file_storage_bytes": 3954374860,
+ "max_reserved_storage_bytes": 1318124953,
+ "max_skaled_leveldb_storage_bytes": 3954374860
+ },
+ "max_consensus_db": {
+ "max_consensus_storage_bytes": 9490499665,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 1318124953,
+ "max_skaled_leveldb_storage_bytes": 2372624916
+ },
+ "max_contract_storage": {
+ "max_consensus_storage_bytes": 1779468687,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 1318124953,
+ "max_skaled_leveldb_storage_bytes": 10083655895
+ },
+ "max_filestorage": {
+ "max_consensus_storage_bytes": 1779468687,
+ "max_file_storage_bytes": 8304187207,
+ "max_reserved_storage_bytes": 1318124953,
+ "max_skaled_leveldb_storage_bytes": 1779468687
+ },
+ "no_filestorage": {
+ "max_consensus_storage_bytes": 5931562291,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 1318124953,
+ "max_skaled_leveldb_storage_bytes": 5931562291
+ }
},
"test": {
- "max_consensus_storage_bytes": 712499404,
- "max_file_storage_bytes": 712499404,
- "max_reserved_storage_bytes": 237499801,
- "max_skaled_leveldb_storage_bytes": 712499404
+ "default": {
+ "max_consensus_storage_bytes": 63269997772,
+ "max_file_storage_bytes": 63269997772,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 63269997772
+ },
+ "max_consensus_db": {
+ "max_consensus_storage_bytes": 151847994654,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 37961998663
+ },
+ "max_contract_storage": {
+ "max_consensus_storage_bytes": 28471498997,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 161338494320
+ },
+ "max_filestorage": {
+ "max_consensus_storage_bytes": 28471498997,
+ "max_file_storage_bytes": 132866995322,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 28471498997
+ },
+ "no_filestorage": {
+ "max_consensus_storage_bytes": 94904996659,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 94904996659
+ }
},
"test4": {
- "max_consensus_storage_bytes": 712499404,
- "max_file_storage_bytes": 712499404,
- "max_reserved_storage_bytes": 237499801,
- "max_skaled_leveldb_storage_bytes": 712499404
+ "default": {
+ "max_consensus_storage_bytes": 63269997772,
+ "max_file_storage_bytes": 63269997772,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 63269997772
+ },
+ "max_consensus_db": {
+ "max_consensus_storage_bytes": 151847994654,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 37961998663
+ },
+ "max_contract_storage": {
+ "max_consensus_storage_bytes": 28471498997,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 161338494320
+ },
+ "max_filestorage": {
+ "max_consensus_storage_bytes": 28471498997,
+ "max_file_storage_bytes": 132866995322,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 28471498997
+ },
+ "no_filestorage": {
+ "max_consensus_storage_bytes": 94904996659,
+ "max_file_storage_bytes": 0,
+ "max_reserved_storage_bytes": 21089999257,
+ "max_skaled_leveldb_storage_bytes": 94904996659
+ }
}
},
"leveldb_limits": {
- "sync_node": {
- "contract_storage": 13679988571,
- "db_storage": 9119992381
- },
"large": {
- "contract_storage": 13679988571,
- "db_storage": 9119992381
+ "default": {
+ "contract_storage": 303695989309,
+ "db_storage": 101231996436
+ },
+ "max_consensus_db": {
+ "contract_storage": 182217593585,
+ "db_storage": 60739197861
+ },
+ "max_contract_storage": {
+ "contract_storage": 774424772739,
+ "db_storage": 258141590913
+ },
+ "max_filestorage": {
+ "contract_storage": 136663195189,
+ "db_storage": 45554398396
+ },
+ "no_filestorage": {
+ "contract_storage": 455543983963,
+ "db_storage": 151847994654
+ }
},
"medium": {
- "contract_storage": 427499642,
- "db_storage": 284999761
+ "default": {
+ "contract_storage": 37961998663,
+ "db_storage": 12653999554
+ },
+ "max_consensus_db": {
+ "contract_storage": 22777199197,
+ "db_storage": 7592399732
+ },
+ "max_contract_storage": {
+ "contract_storage": 96803096592,
+ "db_storage": 32267698864
+ },
+ "max_filestorage": {
+ "contract_storage": 17082899398,
+ "db_storage": 5694299799
+ },
+ "no_filestorage": {
+ "contract_storage": 56942997995,
+ "db_storage": 18980999331
+ }
},
"small": {
- "contract_storage": 106874910,
- "db_storage": 71249940
+ "default": {
+ "contract_storage": 2372624916,
+ "db_storage": 790874972
+ },
+ "max_consensus_db": {
+ "contract_storage": 1423574949,
+ "db_storage": 474524983
+ },
+ "max_contract_storage": {
+ "contract_storage": 6050193537,
+ "db_storage": 2016731179
+ },
+ "max_filestorage": {
+ "contract_storage": 1067681212,
+ "db_storage": 355893737
+ },
+ "no_filestorage": {
+ "contract_storage": 3558937374,
+ "db_storage": 1186312458
+ }
},
"test": {
- "contract_storage": 427499642,
- "db_storage": 284999761
+ "default": {
+ "contract_storage": 37961998663,
+ "db_storage": 12653999554
+ },
+ "max_consensus_db": {
+ "contract_storage": 22777199197,
+ "db_storage": 7592399732
+ },
+ "max_contract_storage": {
+ "contract_storage": 96803096592,
+ "db_storage": 32267698864
+ },
+ "max_filestorage": {
+ "contract_storage": 17082899398,
+ "db_storage": 5694299799
+ },
+ "no_filestorage": {
+ "contract_storage": 56942997995,
+ "db_storage": 18980999331
+ }
},
"test4": {
- "contract_storage": 427499642,
- "db_storage": 284999761
+ "default": {
+ "contract_storage": 37961998663,
+ "db_storage": 12653999554
+ },
+ "max_consensus_db": {
+ "contract_storage": 22777199197,
+ "db_storage": 7592399732
+ },
+ "max_contract_storage": {
+ "contract_storage": 96803096592,
+ "db_storage": 32267698864
+ },
+ "max_filestorage": {
+ "contract_storage": 17082899398,
+ "db_storage": 5694299799
+ },
+ "no_filestorage": {
+ "contract_storage": 56942997995,
+ "db_storage": 18980999331
+ }
}
}
},
"ima": {
"cpu_shares": {
- "test4": 9,
- "test": 9,
- "small": 2,
- "medium": 9,
- "large": 307,
- "sync_node": 307
+ "test4": 25,
+ "test": 25,
+ "small": 10,
+ "medium": 25,
+ "large": 204
},
"mem": {
- "test4": 128849018,
- "test": 128849018,
- "small": 32212254,
- "medium": 128849018,
- "large": 4123168604,
- "sync_node": 4123168604
+ "test4": 331419893,
+ "test": 331419893,
+ "small": 20713743,
+ "medium": 331419893,
+ "large": 2651359150
}
}
-}
\ No newline at end of file
+}
diff --git a/tests/test_generate_config_sync.py b/tests/test_generate_config_sync.py
index 00b95ba3f..989fccf99 100644
--- a/tests/test_generate_config_sync.py
+++ b/tests/test_generate_config_sync.py
@@ -1,6 +1,7 @@
import json
import pytest
from skale.schain_config.rotation_history import get_previous_schain_groups
+from skale.dataclasses.schain_options import AllocationType
from core.schains.config.predeployed import generate_predeployed_accounts
from core.schains.config.precompiled import generate_precompiled_accounts
@@ -21,7 +22,7 @@ def test_generate_config(skale):
for schain_name in CHAINS:
schain = skale.schains.get_by_name(schain_name)
- schain_type = get_schain_type(schain['partOfNode'])
+ schain_type = get_schain_type(schain.part_of_node)
node_groups = get_previous_schain_groups(skale, schain_name)
original_group = node_groups[0]['nodes']
@@ -33,10 +34,10 @@ def test_generate_config(skale):
'publicKey': value[2]
})
- is_owner_contract = is_address_contract(skale.web3, schain['mainnetOwner'])
- on_chain_owner = get_on_chain_owner(schain, schain['generation'], is_owner_contract)
+ is_owner_contract = is_address_contract(skale.web3, schain.mainnet_owner)
+ on_chain_owner = get_on_chain_owner(schain, schain.generation, is_owner_contract)
- mainnet_owner = schain['mainnetOwner']
+ mainnet_owner = schain.mainnet_owner
originator_address = get_schain_originator(schain)
@@ -47,13 +48,14 @@ def test_generate_config(skale):
base_config = SChainBaseConfig(BASE_SCHAIN_CONFIG_FILEPATH)
predeployed_accounts = generate_predeployed_accounts(
- schain_name=schain['name'],
+ schain_name=schain.name,
+ allocation_type=AllocationType.DEFAULT,
schain_type=schain_type,
schain_nodes=schain_nodes_with_schains,
on_chain_owner=on_chain_owner,
mainnet_owner=mainnet_owner,
originator_address=originator_address,
- generation=schain['generation']
+ generation=schain.generation
)
accounts = {
diff --git a/tests/utils.py b/tests/utils.py
index 29f37e745..3feb301e6 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -12,6 +12,8 @@
from skale import Skale, SkaleIma
from skale.utils.web3_utils import init_web3
+from skale.contracts.manager.schains import SchainStructure
+from skale.dataclasses.schain_options import AllocationType, SchainOptions
from skale.wallets import Web3Wallet
from web3 import Web3
@@ -37,6 +39,7 @@
from web.models.schain import upsert_schain_record
+CURRENT_TS = 1594903080
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
ENDPOINT = os.getenv('ENDPOINT')
@@ -56,6 +59,9 @@
IMA_MIGRATION_TS = 1688388551
+TEST_ORIGINATOR_ADDRESS = '0x0B5e3eBB74eE281A24DDa3B1A4e70692c15EAC34'
+TEST_MAINNET_OWNER_ADDRESS = '0x30E1C96277735B03E59B3098204fd04FD0e78a46'
+
class FailedAPICall(Exception):
pass
@@ -113,19 +119,23 @@ def post_bp_data(bp, request, params=None, full_response=False, **kwargs):
return json.loads(data.decode('utf-8'))
-def get_schain_contracts_data(schain_name):
- """ Schain data mock in case if schain on contracts is not required """
- return {
- 'name': schain_name,
- 'mainnetOwner': '0x1213123091a230923123213123',
- 'indexInOwnerList': 0,
- 'partOfNode': 0,
- 'lifetime': 3600,
- 'startDate': 1575448438,
- 'deposit': 1000000000000000000,
- 'index': 0,
- 'active': True
- }
+def get_schain_struct(schain_name: str = 'test_chain') -> SchainStructure:
+ return SchainStructure(
+ name=schain_name,
+ part_of_node=0,
+ generation=1,
+ mainnet_owner=TEST_MAINNET_OWNER_ADDRESS,
+ originator=TEST_ORIGINATOR_ADDRESS,
+ options=SchainOptions(True, True, AllocationType.DEFAULT),
+ index_in_owner_list=0,
+ lifetime=3600,
+ start_date=100000000,
+ start_block=1000,
+ deposit=0,
+ index=1,
+ chain_id=1,
+ active=True,
+ )
def run_simple_schain_container(schain_data: dict, dutils: DockerUtils):
@@ -462,3 +472,52 @@ def generate_schain_config(schain_name):
}
}
}
+
+
+STATIC_NODE_GROUPS = {
+ '1': {
+ "rotation": {
+ "leaving_node_id": 3,
+ "new_node_id": 4,
+ },
+ "nodes": {
+ "0": [
+ 0,
+ 159,
+ "0xgd"
+ ],
+ "4": [
+ 4,
+ 31,
+ "0x5d"
+ ],
+ },
+ "finish_ts": None,
+ "bls_public_key": None
+ },
+ '0': {
+ "rotation": {
+ "leaving_node_id": 2,
+ "new_node_id": 3,
+ },
+ "nodes": {
+ "0": [
+ 0,
+ 159,
+ "0xgd"
+ ],
+ "3": [
+ 7,
+ 61,
+ "0xbh"
+ ],
+ },
+ "finish_ts": 1681390775,
+ "bls_public_key": {
+ "blsPublicKey0": "3",
+ "blsPublicKey1": "4",
+ "blsPublicKey2": "7",
+ "blsPublicKey3": "9"
+ }
+ }
+}
diff --git a/tools/configs/__init__.py b/tools/configs/__init__.py
index 35cfbcb10..4794de043 100644
--- a/tools/configs/__init__.py
+++ b/tools/configs/__init__.py
@@ -30,6 +30,7 @@
CONFIG_FOLDER = os.path.join(SKALE_VOLUME_PATH, CONFIG_FOLDER_NAME)
STATIC_ACCOUNTS_FOLDER = os.path.join(CONFIG_FOLDER, 'schain_accounts')
+STATIC_GROUPS_FOLDER = os.path.join(CONFIG_FOLDER, 'node_groups')
FLASK_SECRET_KEY_FILENAME = 'flask_db_key.txt'
FLASK_SECRET_KEY_FILE = os.path.join(NODE_DATA_PATH, FLASK_SECRET_KEY_FILENAME)
diff --git a/tools/configs/logs.py b/tools/configs/logs.py
index d21c8da41..0d9205d81 100644
--- a/tools/configs/logs.py
+++ b/tools/configs/logs.py
@@ -42,10 +42,10 @@
REMOVED_CONTAINERS_FOLDER_NAME
)
-LOG_FILE_SIZE_MB = 40
+LOG_FILE_SIZE_MB = 100
LOG_FILE_SIZE_BYTES = LOG_FILE_SIZE_MB * 1000000
-LOG_BACKUP_COUNT = 10
+LOG_BACKUP_COUNT = 20
ADMIN_LOG_FORMAT = '[%(asctime)s %(levelname)s][%(process)d][%(processName)s][%(threadName)s] - %(name)s:%(lineno)d - %(message)s' # noqa
API_LOG_FORMAT = '[%(asctime)s] %(process)d %(levelname)s %(url)s %(module)s: %(message)s' # noqa
diff --git a/tools/configs/schains.py b/tools/configs/schains.py
index 566709ca8..3341799da 100644
--- a/tools/configs/schains.py
+++ b/tools/configs/schains.py
@@ -44,6 +44,7 @@
MAX_SCHAIN_FAILED_RPC_COUNT = int(os.getenv('MAX_SCHAIN_FAILED_RPC_COUNT', 5))
SKALED_STATUS_FILENAME = 'skaled.status'
+NODE_CLI_STATUS_FILENAME = 'node_cli.status'
STATIC_SCHAIN_DIR_NAME = 'schains'
SCHAIN_STATE_PATH = os.path.join(SKALE_LIB_PATH, 'schains')
@@ -53,3 +54,5 @@
RPC_CHECK_TIMEOUT_STEP = 10
MAX_CONSENSUS_STORAGE_INF_VALUE = 1000000000000000000
+
+DKG_TIMEOUT_COEFFICIENT = 2.2
diff --git a/web/migrations.py b/web/migrations.py
index 3341a49bf..44ce37fef 100644
--- a/web/migrations.py
+++ b/web/migrations.py
@@ -65,6 +65,9 @@ def run_migrations(db, migrator):
add_backup_run_field(db, migrator)
add_sync_config_run_field(db, migrator)
+ # 2.7 -> 2.8 update fields
+ add_repair_date_field(db, migrator)
+
def add_new_schain_field(db, migrator):
add_column(
@@ -157,6 +160,13 @@ def add_dkg_step_field(db, migrator):
)
+def add_repair_date_field(db, migrator):
+ add_column(
+ db, migrator, 'SChainRecord', 'repair_date',
+ DateTimeField(default=datetime.now())
+ )
+
+
def find_column(db, table_name, column_name):
columns = db.get_columns(table_name)
return next((x for x in columns if x.name == column_name), None)
diff --git a/web/models/schain.py b/web/models/schain.py
index a7f67eb79..5f92e45ca 100644
--- a/web/models/schain.py
+++ b/web/models/schain.py
@@ -17,12 +17,13 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
+import functools
import logging
-import threading
+import time
from datetime import datetime
from peewee import (CharField, DateTimeField,
- IntegrityError, IntegerField, BooleanField)
+ IntegrityError, IntegerField, BooleanField, OperationalError)
from core.schains.dkg.structures import DKGStatus
from web.models.base import BaseModel
@@ -30,10 +31,31 @@
logger = logging.getLogger(__name__)
DEFAULT_CONFIG_VERSION = '0.0.0'
+RETRY_ATTEMPTS = 5
+TIMEOUTS = [2 ** p for p in range(RETRY_ATTEMPTS)]
+
+
+def operational_error_retry(func):
+ @functools.wraps(func)
+ def wrapper(cls, *args, **kwargs):
+ result, error = None, None
+ for i, timeout in enumerate(TIMEOUTS):
+ try:
+ result = func(cls, *args, **kwargs)
+ except OperationalError as e:
+ logger.error('DB operational error. Sleeping %d', timeout, exc_info=e)
+ error = e
+ time.sleep(timeout)
+ else:
+ error = None
+ break
+ if error is not None:
+ raise error
+ return result
+ return wrapper
class SChainRecord(BaseModel):
- _lock = threading.Lock()
name = CharField(unique=True)
added_at = DateTimeField()
dkg_status = IntegerField()
@@ -54,6 +76,8 @@ class SChainRecord(BaseModel):
ssl_change_date = DateTimeField(default=datetime.now())
+ repair_date = DateTimeField(default=datetime.now())
+
@classmethod
def add(cls, name):
try:
@@ -70,6 +94,7 @@ def add(cls, name):
return (None, err)
@classmethod
+ @operational_error_retry
def get_by_name(cls, name):
return cls.get(cls.name == name)
@@ -107,10 +132,6 @@ def to_dict(cls, record):
'failed_rpc_count': record.failed_rpc_count
}
- def upload(self, *args, **kwargs) -> None:
- with SChainRecord._lock:
- self.save(*args, **kwargs)
-
def dkg_started(self):
self.set_dkg_status(DKGStatus.IN_PROGRESS)
@@ -126,66 +147,66 @@ def dkg_done(self):
def set_dkg_status(self, val: DKGStatus) -> None:
logger.info(f'Changing DKG status for {self.name} to {val.name}')
self.dkg_status = val
- self.upload()
+ self.save()
def set_deleted(self):
self.is_deleted = True
- self.upload()
+ self.save()
def set_first_run(self, val):
logger.info(f'Changing first_run for {self.name} to {val}')
self.first_run = val
- self.upload(only=[SChainRecord.first_run])
+ self.save(only=[SChainRecord.first_run])
def set_backup_run(self, val):
logger.info(f'Changing backup_run for {self.name} to {val}')
self.backup_run = val
- self.upload(only=[SChainRecord.backup_run])
+ self.save(only=[SChainRecord.backup_run])
def set_repair_mode(self, value):
logger.info(f'Changing repair_mode for {self.name} to {value}')
self.repair_mode = value
- self.upload()
+ self.save()
def set_new_schain(self, value):
logger.info(f'Changing new_schain for {self.name} to {value}')
self.new_schain = value
- self.upload()
+ self.save()
def set_needs_reload(self, value):
logger.info(f'Changing needs_reload for {self.name} to {value}')
self.needs_reload = value
- self.upload()
+ self.save()
def set_monitor_last_seen(self, value):
logger.info(f'Changing monitor_last_seen for {self.name} to {value}')
self.monitor_last_seen = value
- self.upload()
+ self.save()
def set_monitor_id(self, value):
logger.info(f'Changing monitor_id for {self.name} to {value}')
self.monitor_id = value
- self.upload()
+ self.save()
def set_config_version(self, value):
logger.info(f'Changing config_version for {self.name} to {value}')
self.config_version = value
- self.upload()
+ self.save()
def set_restart_count(self, value: int) -> None:
logger.info(f'Changing restart count for {self.name} to {value}')
self.restart_count = value
- self.upload()
+ self.save()
def set_failed_rpc_count(self, value: int) -> None:
logger.info(f'Changing failed rpc count for {self.name} to {value}')
self.failed_rpc_count = value
- self.upload()
+ self.save()
def set_snapshot_from(self, value: str) -> None:
logger.info(f'Changing snapshot from for {self.name} to {value}')
self.snapshot_from = value
- self.upload()
+ self.save()
def reset_failed_counters(self) -> None:
logger.info(f'Resetting failed counters for {self.name}')
@@ -203,7 +224,7 @@ def is_dkg_done(self) -> bool:
def set_sync_config_run(self, value):
logger.info(f'Changing sync_config_run for {self.name} to {value}')
self.sync_config_run = value
- self.upload()
+ self.save()
def is_dkg_unsuccessful(self) -> bool:
return self.dkg_status in [
@@ -211,6 +232,11 @@ def is_dkg_unsuccessful(self) -> bool:
DKGStatus.FAILED
]
+ def set_repair_date(self, value: datetime) -> None:
+ logger.info(f'Changing repair_date for {self.name} to {value}')
+ self.repair_date = value
+ self.save()
+
def create_tables():
logger.info('Creating schainrecord table...')
@@ -302,23 +328,3 @@ def get_schains_names(include_deleted=False):
def get_schains_statuses(include_deleted=False):
return [SChainRecord.to_dict(r)
for r in SChainRecord.get_all_records(include_deleted)]
-
-
-def toggle_schain_repair_mode(name, snapshot_from: str = ''):
- logger.info(f'Toggling repair mode for schain {name}')
- query = SChainRecord.update(
- repair_mode=True,
- snapshot_from=snapshot_from
- ).where(SChainRecord.name == name)
- count = query.execute()
- return count > 0
-
-
-def switch_off_repair_mode(name):
- logger.info(f'Disabling repair mode for schain {name}')
- query = SChainRecord.update(
- repair_mode=False,
- snapshot_from=''
- ).where(SChainRecord.name == name)
- count = query.execute()
- return count > 0
diff --git a/web/routes/health.py b/web/routes/health.py
index 2503d674b..f23367472 100644
--- a/web/routes/health.py
+++ b/web/routes/health.py
@@ -18,8 +18,6 @@
# along with this program. If not, see .
import logging
-import telnetlib
-from enum import Enum
from http import HTTPStatus
@@ -27,7 +25,6 @@
from sgx import SgxClient
-from urllib.parse import urlparse
from core.node import get_check_report, get_skale_node_version
from core.node import get_current_nodes
from core.schains.checks import SChainChecks
@@ -38,7 +35,6 @@
from core.schains.ima import get_ima_log_checks
from core.schains.external_config import ExternalState
from tools.sgx_utils import SGX_CERTIFICATES_FOLDER, SGX_SERVER_URL
-from tools.configs import ZMQ_PORT, ZMQ_TIMEOUT
from web.models.schain import SChainRecord
from web.helper import (
construct_err_response,
@@ -51,11 +47,6 @@
BLUEPRINT_NAME = 'health'
-class SGXStatus(Enum):
- CONNECTED = 0
- NOT_CONNECTED = 1
-
-
health_bp = Blueprint(BLUEPRINT_NAME, __name__)
@@ -94,18 +85,18 @@ def schains_checks():
)
checks = []
for schain in schains:
- if schain.get('name') != '':
- rotation_data = g.skale.node_rotation.get_rotation(schain['name'])
+ if schain.name != '':
+ rotation_data = g.skale.node_rotation.get_rotation(schain.name)
rotation_id = rotation_data['rotation_id']
- if SChainRecord.added(schain['name']):
+ if SChainRecord.added(schain.name):
rc = get_default_rule_controller(
- name=schain['name'],
+ name=schain.name,
sync_agent_ranges=sync_agent_ranges
)
- current_nodes = get_current_nodes(g.skale, schain['name'])
- schain_record = SChainRecord.get_by_name(schain['name'])
+ current_nodes = get_current_nodes(g.skale, schain.name)
+ schain_record = SChainRecord.get_by_name(schain.name)
schain_checks = SChainChecks(
- schain['name'],
+ schain.name,
node_id,
schain_record=schain_record,
rule_controller=rc,
@@ -117,7 +108,7 @@ def schains_checks():
sync_node=False
).get_all(needed=checks_filter)
checks.append({
- 'name': schain['name'],
+ 'name': schain.name,
'healthchecks': schain_checks
})
return construct_ok_response(checks)
@@ -137,27 +128,28 @@ def ima_log_checks():
@health_bp.route(get_api_url(BLUEPRINT_NAME, 'sgx'), methods=['GET'])
def sgx_info():
logger.debug(request)
- sgx = SgxClient(SGX_SERVER_URL, SGX_CERTIFICATES_FOLDER)
+ status_zmq = False
+ status_https = False
+ version = None
+ sgx = SgxClient(SGX_SERVER_URL, SGX_CERTIFICATES_FOLDER, zmq=True)
try:
- status = sgx.get_server_status()
- version = sgx.get_server_version()
- except Exception as e: # todo: catch specific error - edit sgx.py
- logger.info(e)
- status = 1
- version = None
- sgx_host = urlparse(SGX_SERVER_URL).hostname
- tn = telnetlib.Telnet()
- zmq_status = 0
+ if sgx.zmq.get_server_status() == 0:
+ status_zmq = True
+ version = sgx.zmq.get_server_version()
+ except Exception as err:
+ logger.error(f'Cannot make SGX ZMQ check {err}')
+ sgx_https = SgxClient(SGX_SERVER_URL, SGX_CERTIFICATES_FOLDER)
try:
- tn.open(sgx_host, ZMQ_PORT, timeout=ZMQ_TIMEOUT)
+ if sgx_https.get_server_status() == 0:
+ status_https = True
+ if version is None:
+ version = sgx_https.get_server_version()
except Exception as err:
- zmq_status = 1
- logger.error(err)
- else:
- tn.close()
+ logger.error(f'Cannot make SGX HTTPS check {err}')
+
res = {
- 'status': zmq_status,
- 'status_name': SGXStatus(status).name,
+ 'status_zmq': status_zmq,
+ 'status_https': status_https,
'sgx_server_url': SGX_SERVER_URL,
'sgx_keyname': g.config.sgx_key_name,
'sgx_wallet_version': version
diff --git a/web/routes/node.py b/web/routes/node.py
index 373603383..0d2a5e88a 100644
--- a/web/routes/node.py
+++ b/web/routes/node.py
@@ -29,7 +29,7 @@
from core.node import get_meta_info, get_node_hardware_info, get_btrfs_info, get_abi_hash
from core.node import check_validator_nodes
-
+from core.updates import update_unsafe_for_schains
from tools.configs.web3 import ABI_FILEPATH, ENDPOINT, UNTRUSTED_PROVIDERS
from tools.configs.ima import MAINNET_IMA_ABI_FILEPATH
@@ -266,3 +266,12 @@ def ima_abi():
logger.debug(request)
abi_hash = get_abi_hash(MAINNET_IMA_ABI_FILEPATH)
return construct_ok_response(data=abi_hash)
+
+
+@node_bp.route(get_api_url(BLUEPRINT_NAME, 'update-safe'), methods=['GET'])
+@g_skale
+def update_safe():
+ logger.debug(request)
+ unsafe_chains = update_unsafe_for_schains(g.skale, g.config, g.docker_utils)
+ safe = len(unsafe_chains) == 0
+ return construct_ok_response(data={'update_safe': safe, 'unsafe_chains': unsafe_chains})
diff --git a/web/routes/schains.py b/web/routes/schains.py
index 223649fb2..58963321e 100644
--- a/web/routes/schains.py
+++ b/web/routes/schains.py
@@ -31,11 +31,11 @@
get_default_rule_controller,
get_sync_agent_ranges
)
-from core.schains.skaled_status import init_skaled_status
+from core.schains.status import init_skaled_status
from core.schains.ima import get_ima_version_after_migration
from core.schains.info import get_schain_info_by_name, get_skaled_version
from core.schains.cleaner import get_schains_on_node
-from web.models.schain import get_schains_statuses, toggle_schain_repair_mode
+from web.models.schain import get_schains_statuses
from web.helper import (
construct_ok_response,
construct_err_response,
@@ -85,7 +85,6 @@ def schain_config():
@schains_bp.route(get_api_url(BLUEPRINT_NAME, 'list'), methods=['GET'])
@g_skale
def schains_list():
- logger.debug(request)
logger.debug(request)
node_id = g.config.id
if node_id is None:
@@ -132,21 +131,6 @@ def firewall_rules():
return construct_ok_response({'endpoints': endpoints})
-@schains_bp.route(get_api_url(BLUEPRINT_NAME, 'repair'), methods=['POST'])
-def repair():
- logger.debug(request)
- schain_name = request.json.get('schain_name')
- snapshot_from = request.json.get('snapshot_from', '')
- result = toggle_schain_repair_mode(
- schain_name, snapshot_from=snapshot_from)
- if result:
- return construct_ok_response()
- else:
- return construct_err_response(
- msg=f'No schain with name {schain_name}'
- )
-
-
@schains_bp.route(get_api_url(BLUEPRINT_NAME, 'get'), methods=['GET'])
@g_skale
def get_schain():