@@ -246,7 +246,7 @@ def analyse(self, data_frame=None, compute_moments=True, compute_Sobols=True):
246
246
std_k = np .sqrt (var_k )
247
247
else :
248
248
pce_coefs = self .SC2PCE (self .samples [qoi_k ])
249
- mean_k , var_k = self .get_pce_stats (self .l_norm , pce_coefs , self .comb_coef )
249
+ mean_k , var_k , _ = self .get_pce_stats (self .l_norm , pce_coefs , self .comb_coef )
250
250
std_k = np .sqrt (var_k )
251
251
252
252
# compute statistical moments
@@ -325,7 +325,7 @@ def adapt_dimension(self, qoi, data_frame, store_stats_history=True,
325
325
name of the refinement error, default is 'surplus'. In this case the
326
326
error is based on the hierarchical surplus, which is an interpolation
327
327
based error. Another possibility is 'var',
328
- in which case the error is based on the difference in the
328
+ in which case the error is based on the difference in the
329
329
variance between the current estimate and the estimate obtained
330
330
when a particular candidate direction is added.
331
331
"""
@@ -343,7 +343,7 @@ def adapt_dimension(self, qoi, data_frame, store_stats_history=True,
343
343
self .wi_1d = self .sampler .wi_1d
344
344
self .pce_coefs = self .SC2PCE (samples , verbose = True , l_norm = all_idx ,
345
345
xi_d = self .sampler .xi_d )
346
- _ , var_l = self .get_pce_stats (self .l_norm , self .pce_coefs , self .comb_coef )
346
+ _ , var_l , _ = self .get_pce_stats (self .l_norm , self .pce_coefs , self .comb_coef )
347
347
348
348
# the currently accepted grid points
349
349
xi_d_accepted = self .sampler .generate_grid (self .l_norm )
@@ -378,7 +378,7 @@ def adapt_dimension(self, qoi, data_frame, store_stats_history=True,
378
378
candidate_l_norm = np .concatenate ((self .l_norm , l .reshape ([1 , self .N ])))
379
379
# now we must recompute the combination coefficients
380
380
c_l = self .compute_comb_coef (l_norm = candidate_l_norm )
381
- _ , var_candidate_l = self .get_pce_stats (candidate_l_norm , self .pce_coefs , c_l )
381
+ _ , var_candidate_l , _ = self .get_pce_stats (candidate_l_norm , self .pce_coefs , c_l )
382
382
#error in var
383
383
error [tuple (l )] = np .linalg .norm (var_candidate_l - var_l , np .inf )
384
384
else :
@@ -413,7 +413,7 @@ def adapt_dimension(self, qoi, data_frame, store_stats_history=True,
413
413
# mean_f, var_f = self.get_moments(qoi)
414
414
logging .debug ('Storing moments of iteration %d' % self .sampler .nadaptations )
415
415
pce_coefs = self .SC2PCE (samples , verbose = True )
416
- mean_f , var_f = self .get_pce_stats (self .l_norm , pce_coefs , self .comb_coef )
416
+ mean_f , var_f , _ = self .get_pce_stats (self .l_norm , pce_coefs , self .comb_coef )
417
417
self .mean_history .append (mean_f )
418
418
self .std_history .append (var_f )
419
419
logging .debug ('done' )
@@ -889,8 +889,8 @@ def SC2PCE(self, samples, verbose=True, **kwargs):
889
889
890
890
# orthogonal polynomial generated by chaospy
891
891
phi_k = [cp .expansion .stieltjes (k [n ] - 1 ,
892
- dist = self .sampler .params_distribution [n ],
893
- normed = True )[- 1 ] for n in range (self .N )]
892
+ dist = self .sampler .params_distribution [n ],
893
+ normed = True )[- 1 ] for n in range (self .N )]
894
894
895
895
# the polynomial order of each integrand phi_k*a_j = (k - 1) + (number of
896
896
# colloc. points - 1)
@@ -950,8 +950,49 @@ def SC2PCE(self, samples, verbose=True, **kwargs):
950
950
logging .debug ('done' )
951
951
return pce_coefs
952
952
953
+ def generalized_pce_coefs (self , l_norm , pce_coefs , comb_coef ):
954
+ """
955
+ Computes the generalized PCE coefficients, defined as the linear combibation
956
+ of PCE coefficients which make it possible to write the dimension-adaptive
957
+ PCE expansion in standard form. See DOI: 10.13140/RG.2.2.18085.58083/1
958
+
959
+ Parameters
960
+ ----------
961
+ l_norm : array
962
+ array of quadrature order multi indices
963
+ pce_coefs : tuple
964
+ tuple of PCE coefficients computed by SC2PCE subroutine
965
+ comb_coef : tuple
966
+ tuple of combination coefficients computed by compute_comb_coef
967
+
968
+ Returns
969
+ -------
970
+ gen_pce_coefs : tuple
971
+ The generalized PCE coefficients, indexed per multi index.
972
+
973
+ """
974
+ assert self .sparse , "Generalized PCE coeffcients are computed only for sparse grids"
975
+
976
+ # the set of all forward neighbours of l: {k | k >= l}
977
+ F_l = {}
978
+ # the generalized PCE coefs, which turn the adaptive PCE into a standard PCE expansion
979
+ gen_pce_coefs = {}
980
+ for l in l_norm :
981
+ # {indices of k | k >= l}
982
+ idx = np .where ((l <= l_norm ).all (axis = 1 ))[0 ]
983
+ F_l [tuple (l )] = l_norm [idx ]
984
+
985
+ # the generalized PCE coefs are comb_coef[k] * pce_coefs[k][l], summed over k
986
+ # for a fixed l
987
+ gen_pce_coefs [tuple (l )] = 0.0
988
+ for k in F_l [tuple (l )]:
989
+ gen_pce_coefs [tuple (l )] += comb_coef [tuple (k )] * pce_coefs [tuple (k )][tuple (l )]
990
+
991
+ return gen_pce_coefs
992
+
953
993
def get_pce_stats (self , l_norm , pce_coefs , comb_coef ):
954
- """Compute the mean and the variance based on the PCE coefficients
994
+ """Compute the mean and the variance based on the generalized PCE coefficients
995
+ See DOI: 10.13140/RG.2.2.18085.58083/1
955
996
956
997
Parameters
957
998
----------
@@ -967,30 +1008,28 @@ def get_pce_stats(self, l_norm, pce_coefs, comb_coef):
967
1008
tuple with mean and variance based on the PCE coefficients
968
1009
"""
969
1010
970
- # Compute the PCE mean
971
- k1 = tuple (np .ones (self .N , dtype = int ))
972
- mean = 0.0
973
- for l in l_norm :
974
- mean = mean + comb_coef [tuple (l )] * pce_coefs [tuple (l )][k1 ]
1011
+ gen_pce_coefs = self .generalized_pce_coefs (l_norm , pce_coefs , comb_coef )
975
1012
1013
+ # with the generalized pce coefs, the standard PCE formulas for the mean and var
1014
+ # can be used for the dimension-adaptive PCE
1015
+
1016
+ # the PCE mean is just the 1st generalized PCE coef
1017
+ l1 = tuple (np .ones (self .N , dtype = int ))
1018
+ mean = gen_pce_coefs [l1 ]
1019
+
1020
+ # the variance is the sum of the squared generalized PCE coefs, excluding the 1st coef
976
1021
D = 0.0
977
- for k in l_norm [1 :]:
978
- var_k = 0.0
979
- for l in l_norm [1 :]:
980
- if tuple (k ) in pce_coefs [tuple (l )].keys ():
981
- eta_k = pce_coefs [tuple (l )][tuple (k )]
982
- var_k = var_k + comb_coef [tuple (l )] * eta_k
983
- var_k = var_k ** 2
984
- D = D + var_k
1022
+ for l in l_norm [1 :]:
1023
+ D += gen_pce_coefs [tuple (l )] ** 2
985
1024
986
- return mean , D
1025
+ return mean , D , gen_pce_coefs
987
1026
988
1027
def get_pce_sobol_indices (self , qoi , typ = 'first_order' , ** kwargs ):
989
1028
"""Computes Sobol indices using Polynomials Chaos coefficients. These
990
1029
coefficients are computed from the SC expansion via a transformation
991
1030
of basis (SC2PCE subroutine). This works better than computing the
992
1031
Sobol indices directly from the SC expansion in the case of the
993
- dimension-adaptive sampler.
1032
+ dimension-adaptive sampler. See DOI: 10.13140/RG.2.2.18085.58083/1
994
1033
995
1034
Method: J.D. Jakeman et al, "Adaptive multi-index collocation
996
1035
for uncertainty quantification and sensitivity analysis", 2019.
@@ -1021,27 +1060,9 @@ def get_pce_sobol_indices(self, qoi, typ='first_order', **kwargs):
1021
1060
samples = self .samples [qoi ]
1022
1061
N_qoi = self .N_qoi
1023
1062
1024
- # compute the PCE coefficients
1063
+ # compute the (generalized) PCE coefficients and stats
1025
1064
self .pce_coefs = self .SC2PCE (samples )
1026
-
1027
- # Compute the PCE mean (not really required)
1028
- k1 = tuple (np .ones (self .N , dtype = int ))
1029
- mean = 0.0
1030
- for l in self .l_norm :
1031
- mean = mean + self .comb_coef [tuple (l )] * self .pce_coefs [tuple (l )][k1 ]
1032
-
1033
- # dict to hold the variance per multi index k
1034
- var = {}
1035
- # D = total PCE variance
1036
- D = 0.0
1037
- for k in self .l_norm [1 :]:
1038
- var_k = 0.0
1039
- for l in self .l_norm [1 :]:
1040
- if tuple (k ) in self .pce_coefs [tuple (l )].keys ():
1041
- eta_k = self .pce_coefs [tuple (l )][tuple (k )]
1042
- var_k = var_k + self .comb_coef [tuple (l )] * eta_k
1043
- var [tuple (k )] = var_k ** 2
1044
- D = D + var [tuple (k )]
1065
+ mean , D , gen_pce_coefs = self .get_pce_stats (self .l_norm , self .pce_coefs , self .comb_coef )
1045
1066
1046
1067
logging .debug ('Computing Sobol indices...' )
1047
1068
# Universe = (0, 1, ..., N - 1)
@@ -1091,7 +1112,7 @@ def get_pce_sobol_indices(self, qoi, typ='first_order', **kwargs):
1091
1112
logging .debug ('Multi indices of dimension %s are %s' % (u , k ))
1092
1113
# the partial variance of u is the sum of all variances index by k
1093
1114
for k_u in k :
1094
- D_u [u ] = D_u [u ] + var [tuple (k_u )]
1115
+ D_u [u ] = D_u [u ] + gen_pce_coefs [tuple (k_u )] ** 2
1095
1116
1096
1117
# normalize D_u by total variance D to get the Sobol index
1097
1118
S_u [u ] = D_u [u ] / D
@@ -1284,13 +1305,12 @@ def get_uncertainty_amplification(self, qoi):
1284
1305
CV_out = np .mean (CV_out [idx ])
1285
1306
blowup = CV_out / CV_in
1286
1307
1287
- logging .debug ('-----------------' )
1288
- logging .debug ('Mean CV input = %.4f %%' % (100 * CV_in , ))
1289
- logging .debug ('Mean CV output = %.4f %%' % (100 * CV_out , ))
1290
- logging .debug (
1291
- 'Uncertainty amplification factor = %.4f/%.4f = %.4f' %
1308
+ print ('-----------------' )
1309
+ print ('Mean CV input = %.4f %%' % (100 * CV_in , ))
1310
+ print ('Mean CV output = %.4f %%' % (100 * CV_out , ))
1311
+ print ('Uncertainty amplification factor = %.4f/%.4f = %.4f' %
1292
1312
(CV_out , CV_in , blowup ))
1293
- logging . debug ('-----------------' )
1313
+ print ('-----------------' )
1294
1314
1295
1315
return blowup
1296
1316
0 commit comments