diff --git a/src/ert/config/ert_config.py b/src/ert/config/ert_config.py index 1ce9b13d0a6..81f90e56d9c 100644 --- a/src/ert/config/ert_config.py +++ b/src/ert/config/ert_config.py @@ -226,6 +226,302 @@ def handle_default(fm_step: ForwardModelStep, arg: str) -> str: } +@staticmethod +def check_non_utf_chars(file_path: str) -> None: + try: + with open(file_path, encoding="utf-8") as f: + f.read() + except UnicodeDecodeError as e: + error_words = str(e).split(" ") + hex_str = error_words[error_words.index("byte") + 1] + try: + unknown_char = chr(int(hex_str, 16)) + except ValueError as ve: + unknown_char = f"hex:{hex_str}" + raise ConfigValidationError( + f"Unsupported non UTF-8 character {unknown_char!r} " + f"found in file: {file_path!r}", + config_file=file_path, + ) from ve + raise ConfigValidationError( + f"Unsupported non UTF-8 character {unknown_char!r} " + f"found in file: {file_path!r}", + config_file=file_path, + ) from e + + +@staticmethod +def read_templates(config_dict) -> list[tuple[str, str]]: + templates = [] + if ConfigKeys.DATA_FILE in config_dict and ConfigKeys.ECLBASE in config_dict: + # This replicates the behavior of the DATA_FILE implementation + # in C, it adds the .DATA extension and facilitates magic string + # replacement in the data file + source_file = config_dict[ConfigKeys.DATA_FILE] + target_file = config_dict[ConfigKeys.ECLBASE].replace("%d", "") + ".DATA" + check_non_utf_chars(source_file) + templates.append([source_file, target_file]) + + for template in config_dict.get(ConfigKeys.RUN_TEMPLATE, []): + templates.append(template) + return templates + + +@staticmethod +def workflows_from_dict( + content_dict, + substitutions, +): + workflow_job_info = content_dict.get(ConfigKeys.LOAD_WORKFLOW_JOB, []) + workflow_job_dir_info = content_dict.get(ConfigKeys.WORKFLOW_JOB_DIRECTORY, []) + hook_workflow_info = content_dict.get(ConfigKeys.HOOK_WORKFLOW, []) + workflow_info = content_dict.get(ConfigKeys.LOAD_WORKFLOW, []) + + workflow_jobs = {} + workflows = {} + hooked_workflows = defaultdict(list) + + errors = [] + + for workflow_job in workflow_job_info: + try: + # WorkflowJob.fromFile only throws error if a + # non-readable file is provided. + # Non-existing files are caught by the new parser + new_job = WorkflowJob.from_file( + config_file=workflow_job[0], + name=None if len(workflow_job) == 1 else workflow_job[1], + ) + name = new_job.name + if name in workflow_jobs: + ConfigWarning.warn( + f"Duplicate workflow jobs with name {name!r}, choosing " + f"{new_job.executable or new_job.script!r} over " + f"{workflow_jobs[name].executable or workflow_jobs[name].script!r}", + name, + ) + workflow_jobs[name] = new_job + except ErtScriptLoadFailure as err: + ConfigWarning.warn( + f"Loading workflow job {workflow_job[0]!r}" + f" failed with '{err}'. It will not be loaded.", + workflow_job[0], + ) + except ConfigValidationError as err: + errors.append(ErrorInfo(message=str(err)).set_context(workflow_job[0])) + + for job_path in workflow_job_dir_info: + for file_name in _get_files_in_directory(job_path, errors): + try: + new_job = WorkflowJob.from_file(config_file=file_name) + name = new_job.name + if name in workflow_jobs: + ConfigWarning.warn( + f"Duplicate workflow jobs with name {name!r}, choosing " + f"{new_job.executable or new_job.script!r} over " + f"{workflow_jobs[name].executable or workflow_jobs[name].script!r}", + name, + ) + workflow_jobs[name] = new_job + except ErtScriptLoadFailure as err: + ConfigWarning.warn( + f"Loading workflow job {file_name!r}" + f" failed with '{err}'. It will not be loaded.", + file_name, + ) + except ConfigValidationError as err: + errors.append(ErrorInfo(message=str(err)).set_context(job_path)) + if errors: + raise ConfigValidationError.from_collected(errors) + + for work in workflow_info: + filename = path.basename(work[0]) if len(work) == 1 else work[1] + try: + existed = filename in workflows + workflow = Workflow.from_file( + work[0], + substitutions, + workflow_jobs, + ) + for job, args in workflow: + if job.ert_script: + try: + job.ert_script.validate(args) + except ConfigValidationError as err: + errors.append(ErrorInfo(message=str(err)).set_context(work[0])) + continue + workflows[filename] = workflow + if existed: + ConfigWarning.warn(f"Workflow {filename!r} was added twice", work[0]) + except ConfigValidationError as err: + ConfigWarning.warn( + f"Encountered the following error(s) while " + f"reading workflow {filename!r}. It will not be loaded: " + + err.cli_message(), + work[0], + ) + + for hook_name, mode in hook_workflow_info: + if hook_name not in workflows: + errors.append( + ErrorInfo( + message="Cannot setup hook for non-existing" + f" job name {hook_name!r}", + ).set_context(hook_name) + ) + continue + + hooked_workflows[mode].append(workflows[hook_name]) + + if errors: + raise ConfigValidationError.from_collected(errors) + return workflow_jobs, workflows, hooked_workflows + + +@staticmethod +def installed_forward_model_steps_from_dict(config_dict) -> dict[str, ForwardModelStep]: + errors = [] + fm_steps = {} + for fm_step in config_dict.get(ConfigKeys.INSTALL_JOB, []): + name = fm_step[0] + fm_step_config_file = path.abspath(fm_step[1]) + try: + new_fm_step = _forward_model_step_from_config_file( + name=name, + config_file=fm_step_config_file, + ) + except ConfigValidationError as e: + errors.append(e) + continue + if name in fm_steps: + ConfigWarning.warn( + f"Duplicate forward model step with name {name!r}, choosing " + f"{fm_step_config_file!r} over {fm_steps[name].executable!r}", + name, + ) + fm_steps[name] = new_fm_step + + for fm_step_path in config_dict.get(ConfigKeys.INSTALL_JOB_DIRECTORY, []): + for file_name in _get_files_in_directory(fm_step_path, errors): + if not path.isfile(file_name): + continue + try: + new_fm_step = _forward_model_step_from_config_file( + config_file=file_name + ) + except ConfigValidationError as e: + errors.append(e) + continue + name = new_fm_step.name + if name in fm_steps: + ConfigWarning.warn( + f"Duplicate forward model step with name {name!r}, " + f"choosing {file_name!r} over {fm_steps[name].executable!r}", + name, + ) + fm_steps[name] = new_fm_step + + if errors: + raise ConfigValidationError.from_collected(errors) + return fm_steps + + +@staticmethod +def create_list_of_forward_model_steps_to_run( + installed_steps: dict[str, ForwardModelStep], + substitutions: Substitutions, + config_dict: dict, + preinstalled_forward_model_steps: dict[str, ForwardModelStep], + env_pr_fm_step: dict[str, dict[str, Any]], +) -> list[ForwardModelStep]: + errors = [] + fm_steps = [] + + env_vars = {} + for key, val in config_dict.get("SETENV", []): + env_vars[key] = substitutions.substitute(val) + + for fm_step_description in config_dict.get(ConfigKeys.FORWARD_MODEL, []): + if len(fm_step_description) > 1: + unsubstituted_step_name, args = fm_step_description + else: + unsubstituted_step_name = fm_step_description[0] + args = [] + fm_step_name = substitutions.substitute(unsubstituted_step_name) + try: + fm_step = copy.deepcopy(installed_steps[fm_step_name]) + + # Preserve as ContextString + fm_step.name = fm_step_name + except KeyError: + errors.append( + ConfigValidationError.with_context( + f"Could not find forward model step {fm_step_name!r} in list" + f" of installed forward model steps: {list(installed_steps.keys())!r}", + fm_step_name, + ) + ) + continue + fm_step.private_args = Substitutions() + for arg in args: + match arg: + case key, val: + fm_step.private_args[key] = val + case val: + fm_step.arglist.append(val) + + should_add_step = True + + if fm_step.required_keywords: + for req in fm_step.required_keywords: + if req not in fm_step.private_args: + errors.append( + ConfigValidationError.with_context( + f"Required keyword {req} not found for forward model step {fm_step_name}", + fm_step_name, + ) + ) + should_add_step = False + + if should_add_step: + fm_steps.append(fm_step) + + for fm_step in fm_steps: + if fm_step.name in preinstalled_forward_model_steps: + try: + substituted_json = create_forward_model_json( + run_id=None, + context=substitutions, + forward_model_steps=[fm_step], + skip_pre_experiment_validation=True, + env_vars=env_vars, + env_pr_fm_step=env_pr_fm_step, + ) + fm_json_for_validation = dict(substituted_json["jobList"][0]) + fm_json_for_validation["environment"] = { + **substituted_json["global_environment"], + **fm_json_for_validation["environment"], + } + fm_step.validate_pre_experiment(fm_json_for_validation) + except ForwardModelStepValidationError as err: + errors.append( + ConfigValidationError.with_context( + f"Forward model step pre-experiment validation failed: {err!s}", + context=fm_step.name, + ), + ) + except Exception as e: # type: ignore + ConfigWarning.warn( + f"Unexpected plugin forward model exception: {e!s}", + context=fm_step.name, + ) + + if errors: + raise ConfigValidationError.from_collected(errors) + + return fm_steps + + @dataclass class ErtConfig: DEFAULT_ENSPATH: ClassVar[str] = "storage" @@ -312,7 +608,7 @@ def with_plugins( preinstalled_fm_steps[fm_step.name] = fm_step if env_pr_fm_step is None: - env_pr_fm_step = _uppercase_subkeys_and_stringify_subvalues( + env_pr_fm_step = uppercase_subkeys_and_stringify_subvalues( pm.get_forward_model_configuration() ) @@ -434,7 +730,7 @@ def from_dict(cls, config_dict) -> Self: errors.append(e["ctx"]["error"]) try: - workflow_jobs, workflows, hooked_workflows = cls._workflows_from_dict( + workflow_jobs, workflows, hooked_workflows = workflows_from_dict( config_dict, substitutions ) except ConfigValidationError as e: @@ -446,7 +742,7 @@ def from_dict(cls, config_dict) -> Self: ) installed_forward_model_steps.update( - cls._installed_forward_model_steps_from_dict(config_dict) + installed_forward_model_steps_from_dict(config_dict) ) except ConfigValidationError as e: @@ -542,7 +838,7 @@ def from_dict(cls, config_dict) -> Self: workflows=workflows, hooked_workflows=hooked_workflows, runpath_file=Path(runpath_file), - ert_templates=cls._read_templates(config_dict), + ert_templates=read_templates(config_dict), installed_forward_model_steps=installed_forward_model_steps, forward_model_steps=cls._create_list_of_forward_model_steps_to_run( installed_forward_model_steps, @@ -555,6 +851,21 @@ def from_dict(cls, config_dict) -> Self: enkf_obs=observations, ) + @classmethod + def _create_list_of_forward_model_steps_to_run( + cls, + installed_steps: dict[str, ForwardModelStep], + substitutions: Substitutions, + config_dict: dict, + ) -> list[ForwardModelStep]: + return create_list_of_forward_model_steps_to_run( + installed_steps, + substitutions, + config_dict, + cls.PREINSTALLED_FORWARD_MODEL_STEPS, + cls.ENV_PR_FM_STEP, + ) + @classmethod def _read_summary_keys(cls, config_dict) -> list[str]: return [ @@ -684,47 +995,6 @@ def _read_user_config_and_apply_site_config( cls._log_custom_forward_model_steps(user_config_dict) return cls._merge_user_and_site_config(user_config_dict, site_config_dict) - @staticmethod - def check_non_utf_chars(file_path: str) -> None: - try: - with open(file_path, encoding="utf-8") as f: - f.read() - except UnicodeDecodeError as e: - error_words = str(e).split(" ") - hex_str = error_words[error_words.index("byte") + 1] - try: - unknown_char = chr(int(hex_str, 16)) - except ValueError as ve: - unknown_char = f"hex:{hex_str}" - raise ConfigValidationError( - f"Unsupported non UTF-8 character {unknown_char!r} " - f"found in file: {file_path!r}", - config_file=file_path, - ) from ve - raise ConfigValidationError( - f"Unsupported non UTF-8 character {unknown_char!r} " - f"found in file: {file_path!r}", - config_file=file_path, - ) from e - - @classmethod - def _read_templates(cls, config_dict) -> list[tuple[str, str]]: - templates = [] - if ConfigKeys.DATA_FILE in config_dict and ConfigKeys.ECLBASE in config_dict: - # This replicates the behavior of the DATA_FILE implementation - # in C, it adds the .DATA extension and facilitates magic string - # replacement in the data file - source_file = config_dict[ConfigKeys.DATA_FILE] - target_file = ( - config_dict[ConfigKeys.ECLBASE].replace("%d", "") + ".DATA" - ) - cls.check_non_utf_chars(source_file) - templates.append([source_file, target_file]) - - for template in config_dict.get(ConfigKeys.RUN_TEMPLATE, []): - templates.append(template) - return templates - @classmethod def _validate_dict( cls, config_dict, config_file: str @@ -741,267 +1011,9 @@ def _validate_dict( ) return errors - @classmethod - def _create_list_of_forward_model_steps_to_run( - cls, - installed_steps: dict[str, ForwardModelStep], - substitutions: Substitutions, - config_dict: dict, - ) -> list[ForwardModelStep]: - errors = [] - fm_steps = [] - - env_vars = {} - for key, val in config_dict.get("SETENV", []): - env_vars[key] = substitutions.substitute(val) - - for fm_step_description in config_dict.get(ConfigKeys.FORWARD_MODEL, []): - if len(fm_step_description) > 1: - unsubstituted_step_name, args = fm_step_description - else: - unsubstituted_step_name = fm_step_description[0] - args = [] - fm_step_name = substitutions.substitute(unsubstituted_step_name) - try: - fm_step = copy.deepcopy(installed_steps[fm_step_name]) - - # Preserve as ContextString - fm_step.name = fm_step_name - except KeyError: - errors.append( - ConfigValidationError.with_context( - f"Could not find forward model step {fm_step_name!r} in list" - f" of installed forward model steps: {list(installed_steps.keys())!r}", - fm_step_name, - ) - ) - continue - fm_step.private_args = Substitutions() - for arg in args: - match arg: - case key, val: - fm_step.private_args[key] = val - case val: - fm_step.arglist.append(val) - - should_add_step = True - - if fm_step.required_keywords: - for req in fm_step.required_keywords: - if req not in fm_step.private_args: - errors.append( - ConfigValidationError.with_context( - f"Required keyword {req} not found for forward model step {fm_step_name}", - fm_step_name, - ) - ) - should_add_step = False - - if should_add_step: - fm_steps.append(fm_step) - - for fm_step in fm_steps: - if fm_step.name in cls.PREINSTALLED_FORWARD_MODEL_STEPS: - try: - substituted_json = create_forward_model_json( - run_id=None, - context=substitutions, - forward_model_steps=[fm_step], - skip_pre_experiment_validation=True, - env_vars=env_vars, - env_pr_fm_step=cls.ENV_PR_FM_STEP, - ) - fm_json_for_validation = dict(substituted_json["jobList"][0]) - fm_json_for_validation["environment"] = { - **substituted_json["global_environment"], - **fm_json_for_validation["environment"], - } - fm_step.validate_pre_experiment(fm_json_for_validation) - except ForwardModelStepValidationError as err: - errors.append( - ConfigValidationError.with_context( - f"Forward model step pre-experiment validation failed: {err!s}", - context=fm_step.name, - ), - ) - except Exception as e: # type: ignore - ConfigWarning.warn( - f"Unexpected plugin forward model exception: {e!s}", - context=fm_step.name, - ) - - if errors: - raise ConfigValidationError.from_collected(errors) - - return fm_steps - def forward_model_step_name_list(self) -> list[str]: return [j.name for j in self.forward_model_steps] - @classmethod - def _workflows_from_dict( - cls, - content_dict, - substitutions, - ): - workflow_job_info = content_dict.get(ConfigKeys.LOAD_WORKFLOW_JOB, []) - workflow_job_dir_info = content_dict.get(ConfigKeys.WORKFLOW_JOB_DIRECTORY, []) - hook_workflow_info = content_dict.get(ConfigKeys.HOOK_WORKFLOW, []) - workflow_info = content_dict.get(ConfigKeys.LOAD_WORKFLOW, []) - - workflow_jobs = {} - workflows = {} - hooked_workflows = defaultdict(list) - - errors = [] - - for workflow_job in workflow_job_info: - try: - # WorkflowJob.fromFile only throws error if a - # non-readable file is provided. - # Non-existing files are caught by the new parser - new_job = WorkflowJob.from_file( - config_file=workflow_job[0], - name=None if len(workflow_job) == 1 else workflow_job[1], - ) - name = new_job.name - if name in workflow_jobs: - ConfigWarning.warn( - f"Duplicate workflow jobs with name {name!r}, choosing " - f"{new_job.executable or new_job.script!r} over " - f"{workflow_jobs[name].executable or workflow_jobs[name].script!r}", - name, - ) - workflow_jobs[name] = new_job - except ErtScriptLoadFailure as err: - ConfigWarning.warn( - f"Loading workflow job {workflow_job[0]!r}" - f" failed with '{err}'. It will not be loaded.", - workflow_job[0], - ) - except ConfigValidationError as err: - errors.append(ErrorInfo(message=str(err)).set_context(workflow_job[0])) - - for job_path in workflow_job_dir_info: - for file_name in _get_files_in_directory(job_path, errors): - try: - new_job = WorkflowJob.from_file(config_file=file_name) - name = new_job.name - if name in workflow_jobs: - ConfigWarning.warn( - f"Duplicate workflow jobs with name {name!r}, choosing " - f"{new_job.executable or new_job.script!r} over " - f"{workflow_jobs[name].executable or workflow_jobs[name].script!r}", - name, - ) - workflow_jobs[name] = new_job - except ErtScriptLoadFailure as err: - ConfigWarning.warn( - f"Loading workflow job {file_name!r}" - f" failed with '{err}'. It will not be loaded.", - file_name, - ) - except ConfigValidationError as err: - errors.append(ErrorInfo(message=str(err)).set_context(job_path)) - if errors: - raise ConfigValidationError.from_collected(errors) - - for work in workflow_info: - filename = path.basename(work[0]) if len(work) == 1 else work[1] - try: - existed = filename in workflows - workflow = Workflow.from_file( - work[0], - substitutions, - workflow_jobs, - ) - for job, args in workflow: - if job.ert_script: - try: - job.ert_script.validate(args) - except ConfigValidationError as err: - errors.append( - ErrorInfo(message=str(err)).set_context(work[0]) - ) - continue - workflows[filename] = workflow - if existed: - ConfigWarning.warn( - f"Workflow {filename!r} was added twice", work[0] - ) - except ConfigValidationError as err: - ConfigWarning.warn( - f"Encountered the following error(s) while " - f"reading workflow {filename!r}. It will not be loaded: " - + err.cli_message(), - work[0], - ) - - for hook_name, mode in hook_workflow_info: - if hook_name not in workflows: - errors.append( - ErrorInfo( - message="Cannot setup hook for non-existing" - f" job name {hook_name!r}", - ).set_context(hook_name) - ) - continue - - hooked_workflows[mode].append(workflows[hook_name]) - - if errors: - raise ConfigValidationError.from_collected(errors) - return workflow_jobs, workflows, hooked_workflows - - @classmethod - def _installed_forward_model_steps_from_dict( - cls, config_dict - ) -> dict[str, ForwardModelStep]: - errors = [] - fm_steps = {} - for fm_step in config_dict.get(ConfigKeys.INSTALL_JOB, []): - name = fm_step[0] - fm_step_config_file = path.abspath(fm_step[1]) - try: - new_fm_step = _forward_model_step_from_config_file( - name=name, - config_file=fm_step_config_file, - ) - except ConfigValidationError as e: - errors.append(e) - continue - if name in fm_steps: - ConfigWarning.warn( - f"Duplicate forward model step with name {name!r}, choosing " - f"{fm_step_config_file!r} over {fm_steps[name].executable!r}", - name, - ) - fm_steps[name] = new_fm_step - - for fm_step_path in config_dict.get(ConfigKeys.INSTALL_JOB_DIRECTORY, []): - for file_name in _get_files_in_directory(fm_step_path, errors): - if not path.isfile(file_name): - continue - try: - new_fm_step = _forward_model_step_from_config_file( - config_file=file_name - ) - except ConfigValidationError as e: - errors.append(e) - continue - name = new_fm_step.name - if name in fm_steps: - ConfigWarning.warn( - f"Duplicate forward model step with name {name!r}, " - f"choosing {file_name!r} over {fm_steps[name].executable!r}", - name, - ) - fm_steps[name] = new_fm_step - - if errors: - raise ConfigValidationError.from_collected(errors) - return fm_steps - @property def env_pr_fm_step(self) -> dict[str, dict[str, Any]]: return self.ENV_PR_FM_STEP @@ -1109,7 +1121,8 @@ def _substitutions_from_dict(config_dict) -> Substitutions: return Substitutions(subst_list) -def _uppercase_subkeys_and_stringify_subvalues( +@staticmethod +def uppercase_subkeys_and_stringify_subvalues( nested_dict: dict[str, dict[str, Any]], ) -> dict[str, dict[str, str]]: fixed_dict: dict[str, dict[str, str]] = {} diff --git a/src/ert/run_models/everest_run_model.py b/src/ert/run_models/everest_run_model.py index 43f41f664f4..6a6a98205b5 100644 --- a/src/ert/run_models/everest_run_model.py +++ b/src/ert/run_models/everest_run_model.py @@ -1,5 +1,6 @@ from __future__ import annotations +import collections import datetime import functools import json @@ -27,14 +28,30 @@ from typing_extensions import TypedDict from _ert.events import EESnapshot, EESnapshotUpdate, Event -from ert.config import ErtConfig, ExtParamConfig +from ert.config import ExtParamConfig +from ert.config.ensemble_config import EnsembleConfig +from ert.config.ert_config import ( + _substitutions_from_dict, + create_list_of_forward_model_steps_to_run, + installed_forward_model_steps_from_dict, + read_templates, + uppercase_subkeys_and_stringify_subvalues, + workflows_from_dict, +) +from ert.config.forward_model_step import ForwardModelStep +from ert.config.model_config import ModelConfig +from ert.config.queue_config import QueueConfig from ert.ensemble_evaluator import EnsembleSnapshot, EvaluatorServerConfig +from ert.plugins.plugin_manager import ErtPluginManager from ert.runpaths import Runpaths from ert.storage import open_storage from everest.config import ControlConfig, ControlVariableGuessListConfig, EverestConfig +from everest.config.control_variable_config import ControlVariableConfig from everest.optimizer.everest2ropt import everest2ropt -from everest.simulator.everest_to_ert import everest_to_ert_config -from everest.strings import EVEREST +from everest.simulator.everest_to_ert import ( + everest_to_ert_config_dict, +) +from everest.strings import EVEREST, STORAGE_DIR from ..run_arg import RunArg, create_run_arguments from .base_run_model import BaseRunModel, StatusEvents @@ -99,11 +116,13 @@ class EverestExitCode(IntEnum): class EverestRunModel(BaseRunModel): def __init__( self, - config: ErtConfig, everest_config: EverestConfig, simulation_callback: SimulationCallback | None, optimization_callback: OptimizerCallback | None, ): + assert everest_config.log_dir is not None + assert everest_config.optimization_output_dir is not None + Path(everest_config.log_dir).mkdir(parents=True, exist_ok=True) Path(everest_config.optimization_output_dir).mkdir(parents=True, exist_ok=True) @@ -135,39 +154,119 @@ def __init__( self._batch_id: int = 0 self._status: SimulationStatus | None = None - storage = open_storage(config.ens_path, mode="w") + ens_path = os.path.join(everest_config.output_dir, STORAGE_DIR) + storage = open_storage(ens_path, mode="w") status_queue: queue.SimpleQueue[StatusEvents] = queue.SimpleQueue() + config_dict = everest_to_ert_config_dict(everest_config) + + runpath_file: Path = Path( + os.path.join(everest_config.output_dir, ".res_runpath_list") + ) + + assert everest_config.config_file is not None + config_file: Path = Path(everest_config.config_file) + + model_config = ModelConfig.from_dict(config_dict) + queue_config = QueueConfig.from_dict(config_dict) + + ensemble_config = EnsembleConfig.from_dict(config_dict) + + def _get_variables( + variables: list[ControlVariableConfig] + | list[ControlVariableGuessListConfig], + ) -> list[str] | dict[str, list[str]]: + if ( + isinstance(variables[0], ControlVariableConfig) + and getattr(variables[0], "index", None) is None + ): + return [var.name for var in variables] + result: collections.defaultdict[str, list] = collections.defaultdict(list) + for variable in variables: + if isinstance(variable, ControlVariableGuessListConfig): + result[variable.name].extend( + str(index + 1) for index, _ in enumerate(variable.initial_guess) + ) + else: + result[variable.name].append(str(variable.index)) # type: ignore + return dict(result) + + # This adds an EXT_PARAM key to the ert_config, which is not a true ERT + # configuration key. When initializing an ERT config object, it is ignored. + # It is used by the Simulator object to inject ExtParamConfig nodes. + for control in everest_config.controls or []: + ensemble_config.parameter_configs[control.name] = ExtParamConfig( + name=control.name, + input_keys=_get_variables(control.variables), + output_file=control.name + ".json", + ) + + substitutions = _substitutions_from_dict(config_dict) + substitutions[""] = str(runpath_file) + substitutions[""] = model_config.runpath_format_string + substitutions[""] = model_config.eclbase_format_string + substitutions[""] = model_config.eclbase_format_string + substitutions[""] = str(queue_config.preferred_num_cpu) + + ert_templates = read_templates(config_dict) + _, _, hooked_workflows = workflows_from_dict(config_dict, substitutions) + + installed_forward_model_steps: dict[str, ForwardModelStep] = {} + pm = ErtPluginManager() + for fm_step_subclass in pm.forward_model_steps: + fm_step = fm_step_subclass() + installed_forward_model_steps[fm_step.name] = fm_step + + installed_forward_model_steps.update( + installed_forward_model_steps_from_dict(config_dict) + ) + + env_pr_fm_step = uppercase_subkeys_and_stringify_subvalues( + pm.get_forward_model_configuration() + ) + + forward_model_steps = create_list_of_forward_model_steps_to_run( + installed_forward_model_steps, + substitutions, + config_dict, + installed_forward_model_steps, + env_pr_fm_step, + ) + + env_vars = {} + for key, val in config_dict.get("SETENV", []): + env_vars[key] = val + + self.support_restart = False + self._parameter_configuration = ensemble_config.parameter_configuration + self._parameter_configs = ensemble_config.parameter_configs + self._response_configuration = ensemble_config.response_configuration + super().__init__( storage, - config.runpath_file, - Path(config.user_config_file), - config.env_vars, - config.env_pr_fm_step, - config.model_config, - config.queue_config, - config.forward_model_steps, + runpath_file, + config_file, + env_vars, + env_pr_fm_step, + model_config, + queue_config, + forward_model_steps, status_queue, - config.substitutions, - config.ert_templates, - config.hooked_workflows, + substitutions, + ert_templates, + hooked_workflows, active_realizations=[], # Set dynamically in run_forward_model() ) - self.support_restart = False - self._parameter_configuration = config.ensemble_config.parameter_configuration - self._parameter_configs = config.ensemble_config.parameter_configs - self._response_configuration = config.ensemble_config.response_configuration @classmethod def create( cls, - ever_config: EverestConfig, + everest_config: EverestConfig, simulation_callback: SimulationCallback | None = None, optimization_callback: OptimizerCallback | None = None, ) -> EverestRunModel: return cls( - config=everest_to_ert_config(ever_config), - everest_config=ever_config, + everest_config=everest_config, simulation_callback=simulation_callback, optimization_callback=optimization_callback, ) @@ -291,6 +390,7 @@ def _create_optimizer(self) -> BasicOptimizer: # simplifying code that reads them as fixed width tables. `maximize` is # set because ropt reports minimization results, while everest wants # maximization results, necessitating a conversion step. + assert self._everest_config.optimization_output_dir is not None ropt_output_folder = Path(self._everest_config.optimization_output_dir) optimizer = ( BasicOptimizer( diff --git a/src/everest/simulator/everest_to_ert.py b/src/everest/simulator/everest_to_ert.py index e4fe35108e0..4dece53ce50 100644 --- a/src/everest/simulator/everest_to_ert.py +++ b/src/everest/simulator/everest_to_ert.py @@ -489,6 +489,14 @@ def _everest_to_ert_config_dict( return ert_config +def everest_to_ert_config_dict(everest_config: EverestConfig) -> ConfigDict: + with ErtPluginContext(): + config_dict = _everest_to_ert_config_dict( + everest_config, site_config=ErtConfig.read_site_config() + ) + return config_dict + + def everest_to_ert_config(ever_config: EverestConfig) -> ErtConfig: with ErtPluginContext(): config_dict = _everest_to_ert_config_dict(