-
Notifications
You must be signed in to change notification settings - Fork 0
/
varying_lambdas_with_scipy.py
443 lines (433 loc) · 26.5 KB
/
varying_lambdas_with_scipy.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
import inner_optimisation_scipy
from inner_optimisation_scipy import *
# from generate_data import *
import pints_classes
from pints_classes import *
import multiprocessing as mp
import pandas as pd
from itertools import repeat
import csv
import os
import pickle as pkl
# definitions
# set up variables for the simulation
tlim = [300, 14899]
times = np.linspace(*tlim, tlim[-1] - tlim[0], endpoint=False)
voltage = V(times) # must read voltage at the correct times to match the output
del tlim
model_name = 'Kemp' # this is the generative model name, can be HH or Kemp
snr_db = 20 # signal to noise ratio in dB
## set up the parameters for the fitted model
fitted_model = hh_model
state_names = ['a', 'r'] # how many states we have in the model that we are fitting
## settings for the inner optimisation
upper_bound_beta = 0.9999 # upper bound for the betas
lambd = 10e5 # gradient matching weight - test
lambda_exps = [8, 7, 6, 1, 0] # gradient matching weight - test
# outer optimisation settings
inLogScale = True # is the search of thetas in log scale
convergence_threshold = 1e-6
iter_for_convergence = 20
max_iter_outer = 500
## rectangular boundaries of thetas from Clerx et.al. paper - they are the same for two gating variables + one for conductance
theta_lower_boundary = [np.log(10e-5), np.log(10e-5), np.log(10e-5), np.log(10e-5), np.log(110e-5), np.log(10e-5),
np.log(10e-5), np.log(10e-5), np.log(10e-3)]
theta_upper_boundary = [np.log(10e3), np.log(0.4), np.log(10e3), np.log(0.4), np.log(10e3), np.log(0.4),
np.log(10e3), np.log(0.4), np.log(10)]
# parallelisation settings
ncpu = mp.cpu_count()
ncores = 16
####################################################################################################################
### from this point no user changes are required
####################################################################################################################
# load the protocols
load_protocols
# generate the segments with B-spline knots and intialise the betas for splines
jump_indeces, times_roi, voltage_roi, knots_roi, collocation_roi, spline_order = generate_knots(times)
jumps_odd = jump_indeces[0::2]
jumps_even = jump_indeces[1::2]
nSegments = len(jump_indeces[:-1])
print('Inner optimisation is split into ' + str(nSegments) + ' segments based on protocol steps.')
nBsplnesOneState = (len(knots_roi[0]) - spline_order - 1)
nBsplineCoeffs = nBsplnesOneState * len(state_names)
init_betas_roi = nSegments * [0.5 * np.ones(nBsplineCoeffs)]
print('Number of B-spline coeffs per segment: ' + str(nBsplineCoeffs) + '.')
# collect B-spline settings into a list to pass into the inner optimisation
bspline_settings = [nBsplineCoeffs,spline_order,nSegments,state_names]
# set variables definitions for inner optimisation module
inner_optimisation_scipy.nSegments = nSegments
inner_optimisation_scipy.state_names = state_names
inner_optimisation_scipy.spline_order = spline_order
inner_optimisation_scipy.nBsplineCoeffs = nBsplineCoeffs
inner_optimisation_scipy.fitted_model = fitted_model
# main
if __name__ == '__main__':
debug_opt = False
# generate synthetic data
if model_name.lower() not in available_models:
raise ValueError(f'Unknown model name: {model_name}. Available models are: {available_models}')
elif model_name.lower() == 'hh':
thetas_true = thetas_hh_baseline
elif model_name.lower() == 'kemp':
thetas_true = thetas_kemp # the last parameter is the conductance
elif model_name.lower() == 'wang':
thetas_true = thetas_wang # the last parameter is the conductance
Thetas_ODE = thetas_true
param_names = [f'p_{i}' for i in range(1, len(Thetas_ODE) + 1)]
solution, current_model = generate_synthetic_data(model_name, thetas_true, times)
states_true = solution.sol(times) # get the true states generated by the model
snr = 10 ** (snr_db / 10) # convert the signal to noise ratio to linear scale
current_true = current_model(times, solution, thetas_true, snr=snr) # get the true current
states_roi, states_known_roi, current_roi = split_generated_data_into_segments(solution, current_true, jump_indeces,
times)
print('Produced synthetic data for the ' + model_name + ' model based on the pre-loaded voltage protocol.')
####################################################################################################################
# loop over lambda values
for weight in lambda_exps:
lambd = 10 ** weight
print('===========================================================================================================')
print('Starting optimisation for lambda: {:.2E}'.format(lambd) + '.') # print the current weight
####################################################################################################################
# set up table for saving results
# gen_model means generative model - we are denoting which model we are using to generate the data
folderName = 'Results_gen_model_' + model_name + '_lambda_' + str(int(lambd))
if not os.path.exists(folderName):
os.makedirs(folderName)
# save the data to a pickle file
with open(folderName +'/synthetic_data.pkl', 'wb') as f:
pkl.dump([times, voltage, current_true, states_true, thetas_true, knots_roi, snr_db], f)
####################################################################################################################
# set up boundaries for thetas and initialise depending on the scale of search
if inLogScale:
# theta in log scale
init_thetas = np.log(thetas_hh_baseline) # start around the true solution to see how long it takes to converge
sigma0_thetas = 0.1 * np.ones_like(init_thetas)
# boundaries_thetas = pints.RectangularBoundaries(theta_lower_boundary, theta_upper_boundary) # rectangular boundaries
boundaries_thetas = BoundariesTwoStates() # boundaries accounting for the reaction rates
else:
# theta in decimal scale
init_thetas = 0.001 * np.ones_like(thetas_hh_baseline)
sigma0_thetas = 0.0005 * np.ones_like(init_thetas)
boundaries_thetas = pints.RectangularBoundaries(np.exp(theta_lower_boundary), np.exp(theta_upper_boundary))
Thetas_ODE = init_thetas.copy()
####################################################################################################################
## create pints objects for the outer optimisation
model_segments = SegmentOutput()
## create the problem of comparing the modelled current with measured curren
values_to_match_output= np.transpose(np.array([current_true, voltage]))
# ^ we actually only need first two columns in this array but pints wants to have the same number of values and outputs
problem_outer = pints.MultiOutputProblem(model=model_segments, times=times,
values=values_to_match_output)
## associate the cost with it
error_outer = OuterCriterion(problem=problem_outer)
error_outer_no_model = OuterCriterionNoModel(problem=problem_outer) # test if running without model is faster
## create the optimiser
optimiser_outer = pints.CMAES(x0=init_thetas, sigma0=sigma0_thetas,boundaries=boundaries_thetas)
optimiser_outer.set_population_size(min(len(Thetas_ODE) * 7, 30)) # restrict the population size to 30
####################################################################################################################
# take 1: loosely based on ask-tell example from pints
## Run optimisation
theta_visited = []
theta_guessed = []
f_guessed = []
theta_best = []
f_outer_best = []
f_inner_best = []
f_gradient_best = []
InnerCosts_all = []
OuterCosts_all = []
GradCost_all = []
# create a logger file
csv_file_name = folderName + '/iterations_both_states.csv'
theta_names = ['Theta_' + str(i) for i in range(len(Thetas_ODE))] # create names for all thetas
column_names = ['Iteration', 'Walker'] + theta_names + ['Inner Cost', 'Outer Cost', 'Gradient Cost']
####################################################################################################################
# open the file to write to
inner_optimisation_scipy.voltage = voltage # send the voltage to the inner optimisation module
inner_optimisation_scipy.current_true = current_true # send the current to the inner optimisation module
big_tic = tm.time()
with open(csv_file_name, mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(column_names)
# run the outer optimisation
for i in range(max_iter_outer):
# get the next points (multiple locations)
thetas = optimiser_outer.ask()
# deal with the log scale
if inLogScale:
# convert thetas to decimal scale for inner optimisation
thetasForInner = np.exp(thetas)
else:
thetasForInner = thetas
# create the placeholder for cost functions
OuterCosts = []
InnerCosts = []
GradCosts = []
betas_visited = []
# for each theta in the sample
tic = tm.time()
# run inner optimisation for each theta sample
with mp.get_context('fork').Pool(processes=min(ncpu, ncores)) as pool:
results = pool.starmap(inner_optimisation,
zip(thetasForInner, repeat(lambd), repeat(times_roi), repeat(voltage_roi), repeat(current_roi),
repeat(knots_roi), repeat(collocation_roi)))
# package results is a list of tuples
# extract the results
for iSample, result in enumerate(results):
betas_sample, inner_cost_sample, data_cost_sample, grad_cost_sample, state_fitted_at_sample = result
# get the states at this sample
state_all_segments = np.array(state_fitted_at_sample)
pints_classes.state_all_segments = state_all_segments # send the variable into pint classes
# store the costs
InnerCosts.append(inner_cost_sample)
OuterCosts.append(data_cost_sample)
GradCosts.append(grad_cost_sample)
betas_visited.append(betas_sample)
# tell the optimiser about the costs
optimiser_outer.tell(OuterCosts)
# store the best point
index_best = OuterCosts.index(min(OuterCosts))
theta_best.append(thetas[index_best, :])
f_outer_best.append(OuterCosts[index_best])
f_inner_best.append(InnerCosts[index_best])
f_gradient_best.append(GradCosts[index_best])
betas_best = betas_visited[index_best]
# ad hoc solution to the problem of the optimiser getting stuck at the boundary
init_betas_roi = []
for betas_to_intitialise in betas_best:
if any(betas_to_intitialise >= upper_bound_beta):
# find index of the offending beta
index = np.where(betas_to_intitialise == upper_bound_beta)[0]
betas_to_intitialise[index] = upper_bound_beta * 0.9
init_betas_roi.append(betas_to_intitialise)
# store the costs for all samples in the iteration
InnerCosts_all.append(InnerCosts)
OuterCosts_all.append(OuterCosts)
GradCost_all.append(GradCosts)
# store the visited points
theta_visited.append(thetas)
# theta_guessed.append(optimiser_outer.guess())
# f_guessed.append(optimiser_outer.guesses())
# print the results
print('Iteration: ', i)
print('Best parameters: ', theta_best[-1])
print('Best objective: ', f_outer_best[-1])
print('Mean objective: ', np.mean(OuterCosts))
print('Inner objective at best sample: ', f_inner_best[-1])
print('Gradient objective at best sample: ', f_gradient_best[-1])
print('Time elapsed: ', tm.time() - tic)
# write the results to a csv file
for iWalker in range(len(thetas)):
row = [i, iWalker] + list(thetas[iWalker]) + [InnerCosts[iWalker], OuterCosts[iWalker],
GradCosts[iWalker]]
writer.writerow(row)
file.flush()
# check for convergence
if (i > iter_for_convergence):
# check how the cost increment changed over the last 10 iterations
d_cost = np.diff(f_outer_best[-iter_for_convergence:])
# if all incrementa are below a threshold break the loop
if all(d <= convergence_threshold for d in d_cost):
print("No changes in" + str(iter_for_convergence) + "iterations. Terminating")
break
## end convergence check condition
## end loop over iterations
big_toc = tm.time()
# convert the lists to numpy arrays
theta_best = np.array(theta_best)
f_outer_best = np.array(f_outer_best)
f_inner_best = np.array(f_inner_best)
f_gradient_best = np.array(f_gradient_best)
print('Total time taken: ', big_toc - big_tic)
print('===========================================================================================================')
####################################################################################################################
## save the best betas as a table to csv file
df_betas = pd.DataFrame()
for i, beta in enumerate(init_betas_roi):
df_betas['segment_' + str(i)] = beta
df_betas.to_csv('best_betas_both_states.csv', index=False)
####################################################################################################################
## simulate the optimised model using B-splines
Thetas_ODE = theta_best[-1]
# dont forget to convert to decimal scale if the search was done in log scale
if inLogScale:
# convert thetas to decimal scale for inner optimisation
Thetas_ODE = np.exp(Thetas_ODE)
else:
Thetas_ODE = Thetas_ODE
test_output = inner_optimisation(Thetas_ODE, lambd, times_roi, voltage_roi, current_roi, knots_roi,
collocation_roi)
betas_sample, inner_cost_sample, data_cost_sample, grad_cost_sample, state_fitted_at_sample = test_output
state_all_segments = np.array(state_fitted_at_sample)
current_all_segments = observation_direct_input(state_all_segments, voltage, Thetas_ODE)
# get the derivative and the RHS
rhs_of_roi = {key: [] for key in state_names}
deriv_of_roi = {key: [] for key in state_names}
for iSegment in range(nSegments):
model_output_fit = simulate_segment(betas_sample[iSegment], times_roi[iSegment], knots_roi[iSegment],
first_spline_coeffs=None)
state_at_sample, state_deriv_at_sample, rhs_at_sample = np.split(model_output_fit, 3, axis=1)
if iSegment == 0:
index_start = 0 # from which timepoint to store the states
else:
index_start = 1 # from which timepoint to store the states
for iState, stateName in enumerate(state_names):
deriv_of_roi[stateName] += list(state_deriv_at_sample[index_start:, iState])
rhs_of_roi[stateName] += list(rhs_at_sample[index_start:, iState])
## end of loop over segments
## simulate the model using the best thetas and the ODE model used
x0_optimised_ODE = state_all_segments[:, 0]
solution_optimised = sp.integrate.solve_ivp(fitted_model, [0, times[-1]], x0_optimised_ODE, args=[Thetas_ODE],
dense_output=True, method='LSODA', rtol=1e-8, atol=1e-8)
states_optimised_ODE = solution_optimised.sol(times)
current_optimised_ODE = observation_direct_input(states_optimised_ODE, voltage, Thetas_ODE)
####################################################################################################################
## create figures so you can populate them with the data
fig, axes = plt.subplot_mosaic([['a)'], ['b)'], ['c)']], layout='constrained', sharex=True)
y_labels = ['I'] + state_names
for _, ax in axes.items():
for iSegment, SegmentStart in enumerate(jumps_odd):
ax.axvspan(times[SegmentStart], times[jumps_even[iSegment]], facecolor='0.2', alpha=0.1)
# axes['a)'].plot(times, current_true, '-k', label=r'Current true', linewidth=1.5, alpha=0.5)
fig1, axes1 = plt.subplot_mosaic([['a)', 'a)'], ['b)', 'c)'], ['d)', 'e)']], layout='constrained')
y_labels1 = ['I_{true} - I_{model}', 'da(C) - RHS(C)', 'dr(C) - RHS(C)',
'a - Phi C_a', 'r - Phi C_r']
for _, ax in axes1.items():
for iSegment, SegmentStart in enumerate(jumps_odd):
ax.axvspan(times[SegmentStart], times[jumps_even[iSegment]], facecolor='0.2', alpha=0.1)
# add test to plots - compare the modelled current with the true current
axes['a)'].plot(times, current_all_segments, '-c',
label=r'Current from B-spline approx.', linewidth=1, alpha=0.7)
axes['a)'].plot(times, current_optimised_ODE, ':m',
label=r'Current from ODE solution', linewidth=1, alpha=0.7)
# axes['a)'].set_xlim(times_of_segments[0], times_of_segments[-1])
# axes['a)'].set_xlim(1890, 1920)
axes['b)'].plot(times, state_all_segments[0, :], '-c',
label=r'B-spline approx. at $\lambda$ = ' + str(int(lambd)),
linewidth=1, alpha=0.7)
axes['c)'].plot(times, state_all_segments[1, :], '-c',
label=r'B-spline approx. at $\lambda$ = ' + str(int(lambd)),
linewidth=1, alpha=0.7)
axes['b)'].plot(times, states_optimised_ODE[0, :], ':m',
label=r'Fitted ODE solution at $\lambda$ = ' + str(int(lambd)),
linewidth=1, alpha=0.7)
axes['c)'].plot(times, states_optimised_ODE[1, :], ':m',
label=r'Fitted ODE solution at $\lambda$ = ' + str(int(lambd)),
linewidth=1, alpha=0.7)
axes1['a)'].plot(times, current_all_segments - current_true, '--c', label=r'Current from B-spline approx.',
linewidth=1, alpha=0.7)
axes1['a)'].plot(times, current_optimised_ODE - current_true, ':m',
label=r'Current from ODE solution', linewidth=1, alpha=0.7)
axes1['b)'].plot(times, np.array(rhs_of_roi[state_names[0]]) - np.array(deriv_of_roi[state_names[0]]),
'--k', label=r'Gradient matching error at $\lambda$ = ' + str(int(lambd)), linewidth=1,
alpha=0.7)
axes1['c)'].plot(times, np.array(rhs_of_roi[state_names[1]]) - np.array(deriv_of_roi[state_names[1]]),
'--k', label=r'Gradient matching at $\lambda$ = ' + str(int(lambd)), linewidth=1, alpha=0.7)
axes1['d)'].plot(times, state_all_segments[0, :] - states_optimised_ODE[0, :], '--k',
label=r'B-spline approx. error at $\lambda$ = ' + str(int(lambd)), linewidth=1, alpha=0.7)
axes1['e)'].plot(times, state_all_segments[1, :] - states_optimised_ODE[1, :], '--k',
label=r'B-spline approx. error at $\lambda$ = ' + str(int(lambd)), linewidth=1, alpha=0.7)
## save the figures
iAx = 0
for _, ax in axes.items():
ax.set_ylabel(y_labels[iAx], fontsize=12)
ax.set_facecolor('white')
ax.grid(which='major', color='grey', linestyle='solid', alpha=0.2, linewidth=1)
ax.legend(fontsize=12, loc='best')
iAx += 1
# plt.tight_layout(pad=0.3)
fig.savefig(folderName + '/states_model_output.png', dpi=400)
iAx = 0
for _, ax in axes1.items():
ax.set_ylabel(y_labels1[iAx], fontsize=12)
ax.set_facecolor('white')
ax.grid(which='major', color='grey', linestyle='solid', alpha=0.2, linewidth=1)
ax.legend(fontsize=12, loc='best')
iAx += 1
# plt.tight_layout(pad=0.3)
fig1.savefig(folderName + '/errors_model_output.png', dpi=400)
####################################################################################################################
# plot evolution of inner costs
plt.figure(figsize=(10, 6))
plt.semilogy()
plt.xlabel('Iteration')
plt.ylabel('Inner optimisation cost')
for iIter in range(len(f_outer_best) - 1):
plt.scatter(iIter * np.ones(len(InnerCosts_all[iIter])), InnerCosts_all[iIter], c='k', marker='.', alpha=.5,
linewidths=0)
iIter += 1
plt.scatter(iIter * np.ones(len(InnerCosts_all[iIter])), InnerCosts_all[iIter], c='k', marker='.', alpha=.5,
linewidths=0,
label='Sample cost min: J(C / Theta, Y) = ' + "{:.5e}".format(min(InnerCosts_all[iIter])))
plt.plot(f_inner_best, '-b', linewidth=1.5,
label='Best cost:J(C / Theta_{best}, Y) = ' + "{:.5e}".format(
f_inner_best[-1]))
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(folderName + '/inner_cost_evolution.png', dpi=400)
# plot evolution of outer costs
plt.figure(figsize=(10, 6))
plt.semilogy()
plt.xlabel('Iteration')
plt.ylabel('Outer optimisation cost')
for iIter in range(len(f_outer_best) - 1):
plt.scatter(iIter * np.ones(len(OuterCosts_all[iIter])), OuterCosts_all[iIter], c='k', marker='.', alpha=.5,
linewidths=0)
iIter += 1
plt.scatter(iIter * np.ones(len(OuterCosts_all[iIter])), OuterCosts_all[iIter], c='k', marker='.', alpha=.5,
linewidths=0, label='Sample cost: H(Theta / C, Y)')
plt.plot(f_outer_best, '-b', linewidth=1.5,
label='Best cost:H(Theta_{best} / C, Y) = ' + "{:.5e}".format(f_outer_best[-1]))
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(folderName + '/outer_cost_evolution.png', dpi=400)
# plot evolution of outer costs
plt.figure(figsize=(10, 6))
plt.semilogy()
plt.xlabel('Iteration')
plt.ylabel('Gradient matching cost')
for iIter in range(len(f_gradient_best) - 1):
plt.scatter(iIter * np.ones(len(GradCost_all[iIter])), GradCost_all[iIter], c='k', marker='.', alpha=.5,
linewidths=0)
iIter += 1
plt.scatter(iIter * np.ones(len(GradCost_all[iIter])), GradCost_all[iIter], c='k', marker='.', alpha=.5,
linewidths=0, label='Sample cost: G_{ODE}(C / Theta, Y)')
# plt.plot(range(len(f_gradient_best)), np.ones(len(f_gradient_best)) * GradCost_given_true_theta, '--m', linewidth=2.5, alpha=.5,label='Collocation solution: G_{ODE}( C / Theta_{true}, Y) = ' + "{:.5e}".format(
# GradCost_given_true_theta))
plt.plot(f_gradient_best, '-b', linewidth=1.5,
label='Best cost:G_{ODE}(C / Theta, Y) = ' + "{:.5e}".format(f_gradient_best[-1]))
plt.legend(loc='best')
plt.tight_layout()
plt.savefig(folderName + '/gradient_cost_evolution.png', dpi=400)
# plot parameter values after search was done on decimal scale
fig, axes = plt.subplots(len(Thetas_ODE), 1, figsize=(3 * len(Thetas_ODE), 16), sharex=True)
for iAx, ax in enumerate(axes.flatten()):
for iIter in range(len(theta_best)):
x_visited_iter = theta_visited[iIter][:, iAx]
ax.scatter(iIter * np.ones(len(x_visited_iter)), x_visited_iter, c='k', marker='.', alpha=.2,
linewidth=0)
# ax.plot(range(iIter+1),np.ones(iIter+1)*theta_true[iAx], '--m', linewidth=2.5,alpha=.5, label=r"true: log("+param_names[iAx]+") = " +"{:.6f}".format(theta_true[iAx]))
# ax.plot(theta_guessed[:,iAx],'--r',linewidth=1.5,label=r"guessed: $\theta_{"+str(iAx+1)+"} = $" +"{:.4f}".format(theta_guessed[-1,iAx]))
ax.plot(theta_best[:, iAx], '-m', linewidth=1.5,
label=r"best: log(" + param_names[iAx] + ") = " + "{:.6f}".format(theta_best[-1, iAx]))
ax.set_ylabel('log(' + param_names[iAx] + ')')
ax.legend(loc='best')
plt.tight_layout()
plt.savefig(folderName + '/ODE_params_log_scale.png', dpi=400)
# plot parameter values converting from log scale to decimal
fig, axes = plt.subplots(len(Thetas_ODE), 1, figsize=(3 * len(Thetas_ODE), 16), sharex=True)
for iAx, ax in enumerate(axes.flatten()):
for iIter in range(len(theta_best)):
x_visited_iter = theta_visited[iIter][:, iAx]
ax.scatter(iIter * np.ones(len(x_visited_iter)), np.exp(x_visited_iter), c='k', marker='.', alpha=.2,
linewidth=0)
# ax.plot(range(iIter+1),np.ones(iIter+1)*np.exp(theta_true[iAx]), '--m', linewidth=2.5,alpha=.5, label="true: "+param_names[iAx]+" = " +"{:.6f}".format(np.exp(theta_true[iAx])))
# ax.plot(np.exp(theta_guessed[:,iAx]),'--r',linewidth=1.5,label="guessed: $a_{"+str(iAx+1)+"} = $" +"{:.4f}".format(np.exp(theta_guessed[-1,iAx])))
ax.plot(np.exp(theta_best[:, iAx]), '-m', linewidth=1.5,
label="best: " + param_names[iAx] + " = " + "{:.6f}".format(np.exp(theta_best[-1, iAx])))
ax.set_ylabel(param_names[iAx])
ax.set_yscale('log')
ax.legend(loc='best')
ax.set_xlabel('Iteration')
plt.tight_layout()
plt.savefig(folderName + '/ODE_params.png', dpi=400)