Skip to content

Commit e758f21

Browse files
authored
Merge pull request #386 from UCL-CCS/grid_search
Grid search
2 parents 6dd5ec6 + 4e194e0 commit e758f21

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

54 files changed

+2261
-269
lines changed

easyvvuq/actions/execute_local.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -268,7 +268,7 @@ def start(self, previous=None):
268268
for action in self.actions:
269269
previous = self.wrapper(action, previous)
270270
self.result = previous
271-
assert(self.result['run_id'] == run_id)
271+
assert (self.result['run_id'] == run_id)
272272
return previous
273273

274274
def finished(self):

easyvvuq/analysis/pce_analysis.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -350,8 +350,8 @@ def sobols(P, coefficients):
350350
varied = [_ for _ in self.sampler.vary.get_keys()]
351351
S1 = {_: np.zeros(sobol.shape[-1]) for _ in varied}
352352
ST = {_: np.zeros(sobol.shape[-1]) for _ in varied}
353-
#S2 = {_ : {__: np.zeros(sobol.shape[-1]) for __ in varied} for _ in varied}
354-
#for v in varied: del S2[v][v]
353+
# S2 = {_ : {__: np.zeros(sobol.shape[-1]) for __ in varied} for _ in varied}
354+
# for v in varied: del S2[v][v]
355355
S2 = {_: np.zeros((len(varied), sobol.shape[-1])) for _ in varied}
356356
for n, si in enumerate(sobol_idx):
357357
if len(si) == 1:
@@ -360,8 +360,8 @@ def sobols(P, coefficients):
360360
elif len(si) == 2:
361361
v1 = varied[si[0]]
362362
v2 = varied[si[1]]
363-
#S2[v1][v2] = sobol[n]
364-
#S2[v2][v1] = sobol[n]
363+
# S2[v1][v2] = sobol[n]
364+
# S2[v2][v1] = sobol[n]
365365
S2[v1][si[1]] = sobol[n]
366366
S2[v2][si[0]] = sobol[n]
367367
for i in si:

easyvvuq/analysis/results.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,7 @@ def _get_sobols_general(self, getter, qoi=None, input_=None):
167167
-------
168168
dict or array
169169
"""
170-
assert(not ((qoi is None) and (input_ is not None)))
170+
assert (not ((qoi is None) and (input_ is not None)))
171171
if (qoi is not None) and (qoi not in self.qois):
172172
raise RuntimeError('no such qoi in this analysis')
173173
if (input_ is not None) and (input_ not in self.inputs):
@@ -349,7 +349,7 @@ def describe(self, qoi=None, statistic=None):
349349
an array with the values for that statistic. Otherwise will return a DataFrame
350350
with more data.
351351
"""
352-
assert(not ((qoi is None) and (statistic is not None)))
352+
assert (not ((qoi is None) and (statistic is not None)))
353353
statistics = ['mean', 'var', 'std', '1%', '10%', '90%', '99%', 'min', 'max', 'median']
354354
qois = self.qois
355355
if qoi is not None:
@@ -361,7 +361,7 @@ def describe(self, qoi=None, statistic=None):
361361
for statistic_ in statistics:
362362
try:
363363
value = self._describe(qoi, statistic_)
364-
assert(isinstance(value, np.ndarray))
364+
assert (isinstance(value, np.ndarray))
365365
for i, x in enumerate(value):
366366
try:
367367
result[(qoi, i)][statistic_] = x

easyvvuq/analysis/sc_analysis.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -385,7 +385,7 @@ def adapt_dimension(self, qoi, data_frame, store_stats_history=True,
385385
c_l = self.compute_comb_coef(l_norm=candidate_l_norm)
386386
_, var_candidate_l, _ = self.get_pce_stats(
387387
candidate_l_norm, self.pce_coefs[qoi], c_l)
388-
#error in var
388+
# error in var
389389
error[tuple(l)] = np.linalg.norm(var_candidate_l - var_l, np.inf)
390390
else:
391391
logging.debug('Specified refinement method %s not recognized' % method)
@@ -467,7 +467,7 @@ def merge_accepted_and_admissible(self, level=0, **kwargs):
467467
admissible_idx = np.array(admissible_idx).reshape([count, self.N])
468468
merged_l = np.concatenate((self.l_norm, admissible_idx))
469469
# make sure final result contains only unique indices and store
470-
#results in l_norm
470+
# results in l_norm
471471
idx = np.unique(merged_l, axis=0, return_index=True)[1]
472472
# return np.array([merged_l[i] for i in sorted(idx)])
473473
self.l_norm = np.array([merged_l[i] for i in sorted(idx)])
@@ -894,7 +894,7 @@ def SC2PCE(self, samples, qoi, verbose=True, **kwargs):
894894
for k in k_norm:
895895
# product of the PCE basis function or order k - 1 and all
896896
# Lagrange basis functions in a_1d, per dimension
897-
#[[phi_k[0]*a_1d[0]], ..., [phi_k[N-1]*a_1d[N-1]]]
897+
# [[phi_k[0]*a_1d[0]], ..., [phi_k[N-1]*a_1d[N-1]]]
898898

899899
# orthogonal polynomial generated by chaospy
900900
phi_k = [cp.expansion.stieltjes(k[n] - 1,
@@ -1265,7 +1265,7 @@ def get_sobol_indices(self, qoi, typ='first_order'):
12651265
for i_u in range(wi_d_u.shape[0]):
12661266
D_u[u] += np.sign(np.prod(diff)) * h[i_u]**2 * wi_d_u[i_u].prod()
12671267

1268-
#D_u[u] = D_u[u].flatten()
1268+
# D_u[u] = D_u[u].flatten()
12691269

12701270
# all subsets of u
12711271
W = list(powerset(u))[0:-1]

easyvvuq/db/sql.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,7 @@ def set_active_app(self, name):
244244
selected = self.session.query(AppTable).filter_by(name=name).all()
245245
if len(selected) == 0:
246246
raise RuntimeError('no such app - {}'.format(name))
247-
assert(not (len(selected) > 1))
247+
assert (not (len(selected) > 1))
248248
app = selected[0]
249249
self.session.query(CampaignTable).update({'active_app': app.id})
250250
self.session.commit()
@@ -519,7 +519,7 @@ def _get_campaign_info(self, campaign_name=None):
519519
-------
520520
SQLAlchemy query for campaign with this name.
521521
"""
522-
assert(isinstance(campaign_name, str) or campaign_name is None)
522+
assert (isinstance(campaign_name, str) or campaign_name is None)
523523
query = self.session.query(CampaignTable)
524524
if campaign_name is None:
525525
campaign_info = query

easyvvuq/encoders/jinja_encoder.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import os
2-
#from string import Template
2+
# from string import Template
33
from jinja2 import Template
44
import logging
55

easyvvuq/sampling/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
from .mc_sampler import MCSampler
2323
from .csv_sampler import CSVSampler
2424
from .dataframe_sampler import DataFrameSampler
25+
from .grid_sampler import Grid_Sampler
2526

2627
__copyright__ = """
2728

easyvvuq/sampling/grid_sampler.py

+143
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,143 @@
1+
"""A grid sampler
2+
3+
Useful for e.g. hyperparameter search. The "vary" dict contains the values
4+
that must be considered per (hyper)parameter, for instance:
5+
6+
vary = {"x1": [0.0, 0.5, 0.1],
7+
"x2 = [1, 3],
8+
"x3" = [True, False]}
9+
10+
The sampler will create a tensor grid using all specified 1D parameter
11+
values.
12+
"""
13+
14+
__author__ = "Wouter Edeling"
15+
__copyright__ = """
16+
17+
Copyright 2018 Robin A. Richardson, David W. Wright
18+
19+
This file is part of EasyVVUQ
20+
21+
EasyVVUQ is free software: you can redistribute it and/or modify
22+
it under the terms of the Lesser GNU General Public License as published by
23+
the Free Software Foundation, either version 3 of the License, or
24+
(at your option) any later version.
25+
26+
EasyVVUQ is distributed in the hope that it will be useful,
27+
but WITHOUT ANY WARRANTY; without even the implied warranty of
28+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29+
Lesser GNU General Public License for more details.
30+
31+
You should have received a copy of the Lesser GNU General Public License
32+
along with this program. If not, see <https://www.gnu.org/licenses/>.
33+
34+
"""
35+
__license__ = "LGPL"
36+
37+
from itertools import product
38+
import numpy as np
39+
from .base import BaseSamplingElement # , Vary
40+
41+
42+
class Grid_Sampler(BaseSamplingElement, sampler_name="grid_sampler"):
43+
44+
def __init__(self, vary, count=0):
45+
"""
46+
Initialize the grid sampler.
47+
48+
Parameters
49+
----------
50+
vary : dict, or list of dicts
51+
A dictionary containing all 1D values for each parameter. For instance
52+
vary = {"x1": [0.0, 0.5. 1.0], "x2": [True, False]}. This will
53+
create a 2D tensor product of all (x1, x2) parameter combinations.
54+
If a list of vary dicts is specified, each vary dict will be treated
55+
independently to generate points. These dicts do not have to contain
56+
the same parameters. The tensor product points are stored in the
57+
'points' list, with one tensor product per vary dict.
58+
count : int, optional
59+
Internal counter used to count the number of samples that have
60+
been executed. The default is 0.
61+
62+
Returns
63+
-------
64+
None.
65+
66+
"""
67+
# allways add vary to list, even if only a single dict is specified
68+
if not isinstance(vary, list):
69+
vary = [vary]
70+
71+
self.vary = vary
72+
self.count = count
73+
self.points = []
74+
75+
# make sure all parameters are stored in a list or array, even
76+
# if they have only a single value
77+
for _vary in vary:
78+
for param in _vary.keys():
79+
if not isinstance(_vary[param], list) and not isinstance(_vary[param], np.ndarray):
80+
vary[param] = [vary[param]]
81+
82+
# use dtype=object to allow for multiple different type (float, boolean etc)
83+
self.points.append(np.array(list(product(*list(_vary.values()))), dtype=object))
84+
85+
# the cumulative sizes of all ensembles generated by the vary dicts
86+
self.cumul_sizes = np.cumsum([points.shape[0] for points in self.points])
87+
# add a zero to the beginning (necessary in __next__ subroutine)
88+
self.cumul_sizes = np.insert(self.cumul_sizes, 0, 0)
89+
90+
def is_finite(self):
91+
return True
92+
93+
def n_samples(self):
94+
"""Returns the number of samples in this sampler.
95+
"""
96+
# return self.points.shape[0]
97+
return self.cumul_sizes[-1]
98+
99+
def get_param_names(self):
100+
"""
101+
Get the names of all parameters that were varied.
102+
103+
Returns
104+
-------
105+
param_names : list
106+
List of parameter names.
107+
108+
"""
109+
param_names = []
110+
for _vary in self.vary:
111+
for name in _vary.keys():
112+
if not name in param_names:
113+
param_names.append(name)
114+
return param_names
115+
116+
def __next__(self):
117+
"""
118+
Return the next sample from the input distributions.
119+
120+
Raises
121+
------
122+
StopIteration
123+
Stop iteration when count >= n_samples.
124+
125+
Returns
126+
-------
127+
run_dict : dict
128+
A dictionary with the random input samples, e.g.
129+
{'x1': 0.5, 'x2': False}.
130+
131+
"""
132+
if self.count < self.n_samples():
133+
vary_idx = np.where(self.count < self.cumul_sizes[1:])[0][0]
134+
run_dict = {}
135+
i_par = 0
136+
for param_name in self.vary[vary_idx].keys():
137+
sample_idx = self.count - self.cumul_sizes[vary_idx]
138+
run_dict[param_name] = self.points[vary_idx][sample_idx][i_par]
139+
i_par += 1
140+
self.count += 1
141+
return run_dict
142+
else:
143+
raise StopIteration

easyvvuq/sampling/simplex_stochastic_collocation.py

+11-11
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ def init_grid(self):
111111
CONSEQUENCE: I NEED TO RE-MAKE A NEW 'Delaunay' OBJECT EVERYTIME THE GRID
112112
IS REFINED.
113113
"""
114-
#tri = Delaunay(xi_k_jl, incremental=True)
114+
# tri = Delaunay(xi_k_jl, incremental=True)
115115
tri = Delaunay(xi_k_jl)
116116

117117
else:
@@ -590,15 +590,15 @@ def check_LEC_j(self, p_j, v, S_j, n_mc, queue):
590590
Psi = self.compute_Psi(xi_Sj, p_j)
591591

592592
# check if Psi is well poised
593-
#det_Psi = np.linalg.det(Psi)
593+
# det_Psi = np.linalg.det(Psi)
594594
# if det_Psi == 0:
595595
# #print 'Warning: determinant Psi is zero.'
596596
# #print 'Reducing local p_j from ' + str(p_j[j]) + ' to a lower value.'
597597
# #return an error code
598598
# return queue.put({'p_j[j]':-99, 'el_idx_j':el_idx_j})
599599

600600
# compute the coefficients c_jl
601-
#c_jl = np.linalg.solve(Psi, v_Sj)
601+
# c_jl = np.linalg.solve(Psi, v_Sj)
602602
c_jl = DAFSILAS(Psi, v_Sj)
603603

604604
# check the LEC condition for all simplices in the STENCIL S_j
@@ -644,15 +644,15 @@ def check_LEC_j(self, p_j, v, S_j, n_mc, queue):
644644
Psi = self.compute_Psi(xi_Sj, p_j)
645645

646646
# check if Psi is well poised
647-
#det_Psi = np.linalg.det(Psi)
647+
# det_Psi = np.linalg.det(Psi)
648648
# if det_Psi == 0:
649649
# #print 'Warning: determinant Psi is zero.'
650650
# #print 'Reducing local p_j from ' + str(p_j[j]) + ' to a lower value.'
651651
# #return an error code
652652
# return queue.put({'p_j[j]':-99, 'el_idx_j':el_idx_j})
653653

654654
# compute the coefficients c_jl
655-
#c_jl = np.linalg.solve(Psi, v_Sj)
655+
# c_jl = np.linalg.solve(Psi, v_Sj)
656656
c_jl = DAFSILAS(Psi, v_Sj, False)
657657

658658
if k == el_idx_j.size:
@@ -684,7 +684,7 @@ def compute_stencil_j(self):
684684

685685
for j in range(n_e):
686686
# the number of points in S_j
687-
#Np1_j = factorial(n_xi + p_j[j])/(factorial(n_xi)*factorial(p_j[j]))
687+
# Np1_j = factorial(n_xi + p_j[j])/(factorial(n_xi)*factorial(p_j[j]))
688688
# k = {1,...,n_s}\{k_j0, ..., k_jn_xi}
689689
idx = np.delete(range(n_s), self.tri.simplices[j])
690690
# store the vertex indices of the element itself
@@ -1055,7 +1055,7 @@ def surrogate(self, xi, S_j, p_j, v):
10551055
# print 'Error, det(Psi)=0 in compute_surplus_k() method, should not be possible'
10561056

10571057
# compute the coefficients c_jl
1058-
#c_jl = np.linalg.solve(Psi, v_Sj)
1058+
# c_jl = np.linalg.solve(Psi, v_Sj)
10591059
c_jl = DAFSILAS(Psi, v_Sj, False)
10601060

10611061
# compute the interpolation on the old grid
@@ -1240,7 +1240,7 @@ def DAFSILAS(A, b, print_message=False):
12401240
P = np.eye(n)
12411241

12421242
# the ill-condition control parameter
1243-
#epsilon = np.finfo(np.float64).eps
1243+
# epsilon = np.finfo(np.float64).eps
12441244
epsilon = 10**-14
12451245

12461246
for i in range(n - 1):
@@ -1266,9 +1266,9 @@ def DAFSILAS(A, b, print_message=False):
12661266
Ap[:, i + col] = tmp
12671267

12681268
# Also interchange the entries in b
1269-
#tmp = A[i, n]
1269+
# tmp = A[i, n]
12701270
# A[i, n] = A[i+col, n]Ap[i+1+j, i:m]
1271-
#A[i+col, n] = tmp
1271+
# A[i+col, n] = tmp
12721272

12731273
# keep track of column switches via a series of permuation matrices P =
12741274
# P1*P2*...*Pi*...*Pn ==> at each iteration x = P*xi
@@ -1305,7 +1305,7 @@ def DAFSILAS(A, b, print_message=False):
13051305

13061306
# ajj = 1, aij = 0 for j = i...n
13071307
Ap[idx[0]:n, idx[0]:n] = np.eye(nullity)
1308-
#bj = 0
1308+
# bj = 0
13091309
Ap[idx[0]:n, n] = 0
13101310
# ejj = 1, eij = 0
13111311
Ap[idx[0]:n, idx[0] + n + 1:m] = np.eye(nullity)

easyvvuq/sampling/stochastic_collocation.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ def __init__(self,
129129
else:
130130
self.l_norm = self.compute_sparse_multi_idx(self.L, self.N)
131131
# create sparse grid of dimension N and level q using the 1d
132-
#rules in self.xi_1d
132+
# rules in self.xi_1d
133133
self.xi_d = self.generate_grid(self.l_norm)
134134

135135
self._n_samples = self.xi_d.shape[0]

tests/gauss/gauss_json.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -60,12 +60,12 @@
6060
numbers += bias
6161
numbers_out = np.array(list(enumerate(numbers)))
6262

63-
#header = 'Step,Value'
63+
# header = 'Step,Value'
6464

65-
#fmt = '%i,%f'
66-
#np.savetxt(output_filename, numbers_out, fmt=fmt, header=header)
65+
# fmt = '%i,%f'
66+
# np.savetxt(output_filename, numbers_out, fmt=fmt, header=header)
6767

68-
#json_output = {'numbers': list(numbers)}
68+
# json_output = {'numbers': list(numbers)}
6969
# with open(output_filename + '.json', 'wt') as json_fp:
7070
# json.dump(json_output, json_fp)
7171

0 commit comments

Comments
 (0)