Skip to content

Commit 857ce7e

Browse files
committed
Hybrid Newton small fixes + first test commit
1 parent 1e3a949 commit 857ce7e

File tree

5 files changed

+141
-12
lines changed

5 files changed

+141
-12
lines changed

opm/simulators/flow/FlowProblem.hpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ class FlowProblem : public GetPropType<TypeTag, Properties::BaseProblem>
158158
using IntensiveQuantities = GetPropType<TypeTag, Properties::IntensiveQuantities>;
159159
using WellModel = GetPropType<TypeTag, Properties::WellModel>;
160160
using AquiferModel = GetPropType<TypeTag, Properties::AquiferModel>;
161-
161+
162162
using Toolbox = MathToolbox<Evaluation>;
163163
using DimMatrix = Dune::FieldMatrix<Scalar, dimWorld, dimWorld>;
164164

@@ -1672,7 +1672,7 @@ class FlowProblem : public GetPropType<TypeTag, Properties::BaseProblem>
16721672
GlobalEqVector drift_;
16731673

16741674
WellModel wellModel_;
1675-
AquiferModel aquiferModel_;
1675+
AquiferModel aquiferModel_;
16761676

16771677
PffGridVector<GridView, Stencil, PffDofData_, DofMapper> pffDofData_;
16781678
TracerModel tracerModel_;

opm/simulators/flow/HybridNewton.hpp

Lines changed: 9 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,7 @@ class BlackOilHybridNewton
8181
: simulator_(simulator)
8282
, configsLoaded_(false)
8383
{}
84+
8485
void tryApplyHybridNewton()
8586
{
8687
// Check if flag activated
@@ -128,16 +129,14 @@ class BlackOilHybridNewton
128129
{
129130
// Load cell indices from file at runtime
130131
std::vector<int> cell_indices = loadCellIndicesFromFile(config.cell_indices_file);
131-
const size_t n_cells = cell_indices.size();
132+
const std::size_t n_cells = cell_indices.size();
132133

133134
// Pass cells to each stage
134135
auto input = constructInputTensor(config, cell_indices, n_cells);
135136
auto output = constructOutputTensor(input, config, n_cells);
136137
updateInitialGuess(output, config, cell_indices, n_cells);
137138
}
138139

139-
140-
141140
protected:
142141

143142
void validateFluidSystem()
@@ -319,7 +318,7 @@ class BlackOilHybridNewton
319318
* correspond to cells and columns correspond to input features.
320319
*/
321320
Opm::ML::Tensor<Evaluation>
322-
constructInputTensor(const HybridNewtonConfig& config, const std::vector<int>& cell_indices, size_t n_cells)
321+
constructInputTensor(const HybridNewtonConfig& config, const std::vector<int>& cell_indices, std::size_t n_cells)
323322
{
324323
const auto& features = config.input_features;
325324

@@ -336,11 +335,11 @@ class BlackOilHybridNewton
336335

337336
const auto& unitSyst = simulator_.vanguard().schedule().getUnits();
338337
// Calculate total input size (feature-major)
339-
size_t input_size = num_per_cell_features * n_cells + num_scalar_features;
338+
std::size_t input_size = num_per_cell_features * n_cells + num_scalar_features;
340339
Opm::ML::Tensor<Evaluation> input(input_size);
341340

342341
// Track offset for each feature in flat tensor
343-
size_t offset = 0;
342+
std::size_t offset = 0;
344343

345344
for (const auto& [name, spec] : features) {
346345
if (name == "TIMESTEP") {
@@ -353,7 +352,7 @@ class BlackOilHybridNewton
353352
offset += 1; // advance offset by 1 for scalar
354353
} else {
355354
// Per-cell feature: assign values for each cell
356-
for (size_t cell_idx = 0; cell_idx < n_cells; ++cell_idx) {
355+
for (std::size_t cell_idx = 0; cell_idx < n_cells; ++cell_idx) {
357356
const auto& intQuants = simulator_.model().intensiveQuantities(cell_indices[cell_idx], 0);
358357
const auto& fs = intQuants.fluidState();
359358

@@ -415,7 +414,7 @@ class BlackOilHybridNewton
415414
* output tensor does not match the expected feature layout.
416415
*/
417416
Opm::ML::Tensor<Evaluation>
418-
constructOutputTensor(const Opm::ML::Tensor<Evaluation>& input, const HybridNewtonConfig& config, size_t n_cells)
417+
constructOutputTensor(const Opm::ML::Tensor<Evaluation>& input, const HybridNewtonConfig& config, std::size_t n_cells)
419418
{
420419
const auto& features = config.output_features;
421420
const int n_features = features.size();
@@ -450,7 +449,7 @@ class BlackOilHybridNewton
450449
* \throws std::runtime_error if an expected output feature is missing
451450
* or if state update fails for any cell.
452451
*/
453-
void updateInitialGuess(Opm::ML::Tensor<Evaluation>& output, const HybridNewtonConfig& config, const std::vector<int>& cell_indices, size_t n_cells)
452+
void updateInitialGuess(Opm::ML::Tensor<Evaluation>& output, const HybridNewtonConfig& config, const std::vector<int>& cell_indices, std::size_t n_cells)
454453
{
455454
const auto& features = config.output_features;
456455

@@ -471,7 +470,7 @@ class BlackOilHybridNewton
471470

472471
const auto& unitSyst = simulator_.vanguard().schedule().getUnits();
473472

474-
for (size_t i = 0; i < n_cells; ++i) {
473+
for (std::size_t i = 0; i < n_cells; ++i) {
475474
const int cell_idx = cell_indices[i];
476475
const auto& intQuants = simulator_.model().intensiveQuantities(cell_idx, /*timeIdx*/0);
477476
auto fs = intQuants.fluidState();

python/test/ml/README.md

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
2+
3+
4+
5+
requirements (same as opm-common/python/opm/ml/ml_tools/requirements.txt)
6+
7+
# Python v.3.9.0 and above
8+
python>=3.9.0<=3.12.0
9+
10+
# Numpy 1.23.0 and above
11+
numpy>=1.23.0
12+
13+
# TensorFlow v.2.1 and above include both CPU and GPU versions.
14+
tensorflow>=2.1.0
15+
16+
# Keras v.2.12.0 and above
17+
keras>=2.12.0
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
from opm.io.ecl_state import EclipseState
2+
from opm.simulators import BlackOilSimulator
Lines changed: 111 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,111 @@
1+
import os, json
2+
3+
def write_config(models, model_dir, multi_mode=False, case_label=None):
4+
"""
5+
Write one or multiple model configs to JSON.
6+
7+
models: list of dicts, each dict must contain:
8+
- input_features
9+
- output_features
10+
- model_path
11+
- active_cells
12+
- apply_time
13+
- feature_engineering_input (optional)
14+
- scaling_input (optional)
15+
- feature_engineering_output (optional)
16+
- scaling_output (optional)
17+
- input_scaling_params (optional)
18+
- output_scaling_params (optional)
19+
20+
multi_mode: if True, write all configs into a single JSON.
21+
case_label: used as filename for multi-mode JSON (required if multi_mode=True).
22+
"""
23+
24+
if multi_mode and not case_label:
25+
raise ValueError("case_label must be provided for multi_mode=True")
26+
27+
os.makedirs(model_dir, exist_ok=True)
28+
all_configs = []
29+
30+
for m in models:
31+
# Normalize input/output features
32+
input_features = m["input_features"]
33+
output_features = m["output_features"]
34+
if isinstance(input_features, str):
35+
input_features = [input_features]
36+
if isinstance(output_features, str):
37+
output_features = [output_features]
38+
39+
n_input = len(input_features)
40+
n_output = len(output_features)
41+
42+
# Optional fields (safe defaults)
43+
fe_input = m.get("feature_engineering_input") or [None]*n_input
44+
fe_output = m.get("feature_engineering_output") or [None]*n_output
45+
scaling_input = m.get("scaling_input") or [None]*n_input
46+
scaling_output = m.get("scaling_output") or [None]*n_output
47+
input_scaling_params = m.get("input_scaling_params") or [None]*n_input
48+
output_scaling_params = m.get("output_scaling_params") or [None]*n_output
49+
50+
# Build input block
51+
input_block = {}
52+
for i, fname in enumerate(input_features):
53+
feat_eng = fe_input[i] if i < len(fe_input) else None
54+
scale = scaling_input[i] if i < len(scaling_input) else None
55+
feature_dict = {
56+
"feature_engineering": feat_eng.lower() if feat_eng and feat_eng.lower() != "none" else None,
57+
"scaling": scale.lower() if scale and scale.lower() != "none" else None,
58+
}
59+
feature_dict = {k: v for k, v in feature_dict.items() if v is not None}
60+
if i < len(input_scaling_params) and input_scaling_params[i] is not None:
61+
feature_dict["scaling_params"] = input_scaling_params[i]
62+
input_block[fname] = feature_dict
63+
64+
# Build output block
65+
output_block = {}
66+
for i, fname in enumerate(output_features):
67+
feat_eng = fe_output[i] if i < len(fe_output) else None
68+
scale = scaling_output[i] if i < len(scaling_output) else None
69+
feature_dict = {
70+
"feature_engineering": feat_eng.lower() if feat_eng and feat_eng.lower() != "none" else None,
71+
"scaling": scale.lower() if scale and scale.lower() != "none" else None,
72+
}
73+
feature_dict = {k: v for k, v in feature_dict.items() if v is not None}
74+
if i < len(output_scaling_params) and output_scaling_params[i] is not None:
75+
feature_dict["scaling_params"] = output_scaling_params[i]
76+
output_block[fname] = feature_dict
77+
78+
# Save active cells
79+
model_base = os.path.splitext(os.path.basename(m["model_path"]))[0]
80+
cells_file = os.path.join(model_dir, model_base + "_active_cells.txt")
81+
with open(cells_file, "w") as f:
82+
for cell in m["active_cells"]:
83+
f.write(f"{cell}\n")
84+
85+
# Build config dict
86+
cfg = {
87+
"model_path": m["model_path"],
88+
"cell_indices_file": cells_file,
89+
"apply_times": [m["apply_time"]],
90+
"features": {
91+
"inputs": input_block,
92+
"outputs": output_block
93+
}
94+
}
95+
all_configs.append(cfg)
96+
97+
# Determine JSON filename
98+
if multi_mode:
99+
if not case_label:
100+
raise ValueError("case_label is required for multi_mode")
101+
json_name = case_label + ".json"
102+
else:
103+
json_name = os.path.splitext(os.path.basename(models[0]["model_path"]))[0] + ".json"
104+
105+
json_path = os.path.join(model_dir, json_name)
106+
107+
# Always write a list of configs
108+
with open(json_path, "w") as f:
109+
json.dump(all_configs, f, indent=2)
110+
111+
return json_path

0 commit comments

Comments
 (0)