Skip to content

Commit 4760ec7

Browse files
committed
Fo and timesteps
1 parent 28f573e commit 4760ec7

File tree

4 files changed

+218
-47
lines changed

4 files changed

+218
-47
lines changed

Examples_And_Papers/on_recovering_2nd_order_convergence_of_LBM_with_reaction_type_source_terms/process_df_dense2sparse.py

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,9 @@ def eat_dots_for_texmaker(value):
1818
s_value = re.sub(r"\.", 'o', s_value)
1919
return s_value
2020

21-
# df = pd.read_pickle("./pickled_df_Da_1.00e+03_dense2sparse_samples_5.pkl")
22-
df = pd.read_pickle("./pickled_df_Da_1.00e-03_dense2sparse_samples_5.pkl")
21+
# df = pd.read_pickle("./pickled_df_Da_1.00e+00_dense2sparse_samples_5.pkl")
22+
df = pd.read_pickle("./pickled_df_Da_1.00e+03_dense2sparse_samples_5.pkl")
23+
# df = pd.read_pickle("./pickled_df_Da_1.00e-03_dense2sparse_samples_5.pkl")
2324

2425
plot_dir = 'AC_plots_2D_epsx_epst_dense2sparse'
2526
if not os.path.exists(plot_dir):
@@ -88,26 +89,26 @@ def eat_dots_for_texmaker(value):
8889
l5 = ax3.loglog(filtered_df['CPU_cost'], filtered_df['err_L2'], linestyle="", color='black', marker=marker, markersize=10, label=scaling)
8990

9091

91-
ax1.set_xlim([1E-3, 1E-1])
92-
ax1.set_xticks([1E-3, 1E-2, 1E-1])
93-
ax1.set_ylim([1E-4,1E-1])
94-
# ax1.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
92+
# ax1.set_xlim([1E-3, 1E-1])
93+
# ax1.set_xticks([1E-3, 1E-2, 1E-1])
94+
# ax1.set_ylim([1E-4,1E-1])
95+
########## ax1.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
9596
ax1.set(xlabel=r'$\Delta x$', ylabel=r'$\mathcal{L}_2 \left(\phi(\Delta x, \Delta t), \phi_{fine} \right)$')
9697
ax1.legend(loc='lower left', shadow=False, bbox_to_anchor=(0.525,0.01))
9798
ax1.grid(which='major')
9899

99-
ax2.set_xticks([1E-5, 1E-4, 1E-3, 1E-2])
100-
ax2.set_xlim([1E-5, 1E-2])
101-
ax2.set_ylim([1E-4,1E-1])
100+
# ax2.set_xticks([1E-5, 1E-4, 1E-3, 1E-2])
101+
# ax2.set_xlim([1E-5, 1E-2])
102+
# ax2.set_ylim([1E-4,1E-1])
102103
ax2.set(xlabel=r'$\Delta t$') #, ylabel=r'$L_2(\phi(\delta t), \phi(\delta t_{min})$')
103104
ax2.legend(loc='lower left', shadow=False, bbox_to_anchor=(0.525,0.01))
104105
ax2.grid(which='major')
105106
ax2.set_yticklabels([]) # Turn off tick labels
106107

107108

108-
ax3.set_xlim([1E5, 1E10])
109-
ax3.set_xticks([1E5, 1E6, 1E7, 1E8, 1E9, 1E10])
110-
ax3.set_ylim([1E-4,1E-1])
109+
# ax3.set_xlim([1E5, 1E10])
110+
# ax3.set_xticks([1E5, 1E6, 1E7, 1E8, 1E9, 1E10])
111+
# ax3.set_ylim([1E-4,1E-1])
111112
ax3.set(xlabel=r'$CPU\; cost$')#, ylabel=r'$L_2(\phi(\delta t), \phi(\delta t_{min})$')
112113
ax3.legend(loc='lower left', shadow=False, bbox_to_anchor=(0.001,0.01))
113114
ax3.grid(which='major')

Examples_And_Papers/on_recovering_2nd_order_convergence_of_LBM_with_reaction_type_source_terms/run2D_AC_scalling_dense2sparse.py

Lines changed: 49 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -51,12 +51,25 @@ def eat_dots_for_texmaker(value):
5151
domain_size0 = 512 # initial size of the domain
5252
nsamples = 5 # number of resolutions
5353

54+
5455
initialPe = 1*5E2
55-
initialDa = 1E-3 # for initialDa in [1E-3, 1E0, 1E3]:
56+
initialDa = 1E3 # for initialDa in [1E-3, 1E0, 1E3]:
57+
initialFo = 1E-3
58+
59+
tc = int(65536/1) # number of timesteps, tc = 65536 --> diffusivity0 = 4E-3
60+
diffusivity0 = initialFo*(domain_size0**2)/tc
61+
62+
63+
##################### testy
5664

65+
# diffusivity0 = 1E-2
66+
# initialPe = 0*5E2
67+
# initialDa = 1E1 # for initialDa in [1E-3, 1E0, 1E3]:
68+
# initialFo = 1E-2
69+
70+
print(f"diffusivity0 = {diffusivity0:.2e}")
71+
#####################
5772

58-
tc = 65536 # number of timesteps for dt=1 aka Time
59-
diffusivity0 = 4E-3
6073

6174
lambda_ph0 = initialDa*diffusivity0/domain_size0**2
6275

@@ -68,17 +81,15 @@ def eat_dots_for_texmaker(value):
6881
df_for_plots_part1 = pd.DataFrame()
6982
df_for_plots_part2 = pd.DataFrame()
7083

84+
def calc_sim_numbers(lambda_phi, L, M, Ux, n_iter):
85+
Da = (lambda_phi * L**2) / M # Damkohler similarity number
86+
Pe = Ux*L / M # Peclet similarity number (similar to Reynolds, but refers to advection-diffusion eq)
87+
Fo = M*n_iter/(L**2) # Fourier similarity number
88+
89+
return Da,Pe,Fo
90+
7191
for scaling in [acoustic_scaling, diffusive_scaling]:
7292
# for scaling in [acoustic_scaling]:
73-
Ux0 = initialPe*diffusivity0/domain_size0 # macroscopic advection velocity
74-
75-
# check Da, Pe
76-
Da0 = (lambda_ph0 * domain_size0**2) / \
77-
diffusivity0 # Damkohler similarity number
78-
# Peclet similarity number (similar to Reynolds, but refers to advection-diffusion eq)
79-
Pe0 = Ux0*domain_size0 / diffusivity0
80-
print(f"Initial Damköhler number: {Da0:.2e} \t Peclet number: {Pe0:.2e}")
81-
8293
df_latex = pd.DataFrame()
8394
L2 = list()
8495

@@ -106,19 +117,22 @@ def eat_dots_for_texmaker(value):
106117
lambda_ph = coeff * lambda_ph0 * lbdt
107118
# end of acoustic digression
108119

109-
Ux = Pe0*diffusivity/domain_size
120+
Ux = initialPe*diffusivity/domain_size
110121

111122
n_iterations = tc/lbdt
112-
SI_time = n_iterations/(domain_size*domain_size/diffusivity)
113-
print(f"running case {n}/{nsamples}, lbdt={lbdt}, lbdx={lbdx} SI_time={SI_time:.2e}, Ux={Ux:.2e} diffusivity={diffusivity:.2e} lambda_ph={lambda_ph:.2e} ")
114-
assert_almost_equal((lambda_ph * domain_size**2) /
115-
diffusivity, Da0, decimal=6)
116-
assert_almost_equal(Ux*domain_size/diffusivity, Pe0, decimal=6)
117-
assert_almost_equal(math.modf(n_iterations)[
118-
0], 0, decimal=6) # check decimal places
123+
# SI_time = n_iterations/(domain_size*domain_size/diffusivity) # this is just the Fo
124+
125+
# # check Da, Pe, Fo
126+
Da, Pe, Fo = calc_sim_numbers(lambda_ph, domain_size, diffusivity, Ux, n_iterations)
127+
print(f"\n=== Damköhler number: {Da:.2e} \t Peclet number: {Pe:.2e} \t Fourier number {Fo:.2e} ===\n")
128+
129+
print(f"running case {n}/{nsamples}, lbdt={lbdt}, lbdx={lbdx}, Ux={Ux:.2e} diffusivity={diffusivity:.2e} lambda_ph={lambda_ph:.2e} ")
130+
assert_almost_equal((lambda_ph * domain_size**2) / diffusivity, Da, decimal=6)
131+
assert_almost_equal(Ux*domain_size/diffusivity, Pe, decimal=6)
132+
assert_almost_equal(math.modf(n_iterations)[0], 0, decimal=6) # check decimal places
119133
# check decimal places
120134
assert_almost_equal(math.modf(domain_size)[0], 0, decimal=6)
121-
135+
122136
def getXML(**kwars):
123137
# global idx
124138

@@ -209,7 +223,7 @@ def getXML(**kwars):
209223
'n': n,
210224
'L': domain_size,
211225
'scaling': f'{scaling.__name__}',
212-
'Da': Da0,
226+
'Da': Da,
213227
'LBMdx': lbdx,
214228
'LBMdt': lbdt,
215229
'iteration_x_lbdt': data.iteration_x_lbdt,
@@ -225,7 +239,7 @@ def getXML(**kwars):
225239
'n_iterations': int(n_iterations),
226240
# 'log2(LBMdx)': np.log2(lbdx),
227241
# 'log2(LBMdt)': np.log2(lbdt),
228-
r'$time_{SI}$': SI_time,
242+
r'$Fo$': Fo,
229243
# r'$\Lambda$': magic_parameter,
230244
# 'Da': int(Da),
231245
# 'Pe': int(Pe0),
@@ -239,11 +253,11 @@ def getXML(**kwars):
239253
'L': domain_size,
240254
'n_iterations': n_iterations,
241255
'CPU_cost': domain_size*domain_size*n_iterations,
242-
'Da': Da0,
243-
'Pe': Pe0,
256+
'Da': Da,
257+
'Pe': Pe,
258+
'Fo': Fo,
244259
'MagicParameter': magic_parameter,
245260
'scaling': f'{scaling.__name__}',
246-
'SI_time': SI_time,
247261
'LBMdx': lbdx,
248262
'LBMdt': lbdt,
249263
'U': Ux,
@@ -298,8 +312,8 @@ def calc_L2(anal, num):
298312
fig.tight_layout() # otherwise the right y-label is slightly clipped
299313
plt.savefig(f"{plot_dir}/"
300314
f"{scaling.__name__}_2D_phase_field_tc_{tc}"
301-
f"_Da_{eat_dots_for_texmaker(Da0)}"
302-
f"_Pe_{eat_dots_for_texmaker(Pe0)}"
315+
f"_Da_{eat_dots_for_texmaker(Da)}"
316+
f"_Pe_{eat_dots_for_texmaker(Pe)}"
303317
".png", dpi=300)
304318
plt.close(fig)
305319

@@ -311,8 +325,8 @@ def calc_L2(anal, num):
311325
fig.tight_layout() # otherwise the right y-label is slightly clipped
312326
plt.savefig(f"{plot_dir}/"
313327
f"{scaling.__name__}_2D_Q_tc_{tc}"
314-
f"_Da_{eat_dots_for_texmaker(Da0)}"
315-
f"_Pe_{eat_dots_for_texmaker(Pe0)}"
328+
f"_Da_{eat_dots_for_texmaker(Da)}"
329+
f"_Pe_{eat_dots_for_texmaker(Pe)}"
316330
".png", dpi=300)
317331
plt.close(fig)
318332

@@ -323,8 +337,8 @@ def calc_L2(anal, num):
323337
#### PLOT CONVERGENCE ####
324338
fig_name = os.path.join(plot_dir,
325339
f"{scaling.__name__}_2D_"
326-
f"_Da_{eat_dots_for_texmaker(Da0)}"
327-
f"_Pe_{eat_dots_for_texmaker(Pe0)}"
340+
f"_Da_{eat_dots_for_texmaker(Da)}"
341+
f"_Pe_{eat_dots_for_texmaker(Pe)}"
328342
f"_diffusivity0_{eat_dots_for_texmaker(diffusivity)}"
329343
f"_lambda_ph0_{eat_dots_for_texmaker(lambda_ph0)}"
330344
)
@@ -405,14 +419,14 @@ def calc_L2(anal, num):
405419

406420
print(f"SUMMARY:")
407421
print(df_latex.to_latex(index=False, escape=False,
408-
caption=f"Da = {Da0:.2e}, "+r"P{\'e}"+f" = {Pe0:.2e}"))
422+
caption=f"Da = {Da:.2e}, "+r"P{\'e}"+f" = {Pe:.2e}" + f"Fo = {Fo:.2e}" ))
409423

410424
pd.set_option('display.float_format', '{:.2E}'.format)
411425
original_stdout = sys.stdout # Save a reference to the original standard output
412426
with open(os.path.join(plot_dir, f"{scaling.__name__}_latex_table.txt"), 'a+') as f:
413427
sys.stdout = f # Change the standard output to the file we created.
414428
print(df_latex.to_latex(index=False, escape=False,
415-
caption=f"Da = {Da0:.2e}, "+r"P{\'e}"+f" = {Pe0:.2e}"))
429+
caption=f"Da = {Da:.2e}, "+r"P{\'e}"+f" = {Pe:.2e}" + f"Fo = {Fo:.2e}"))
416430
sys.stdout = original_stdout # Reset the standard output to its original value
417431

418432
with pd.ExcelWriter(f"{fig_name}.xlsx") as writer: # doctest: +SKIP
@@ -428,7 +442,7 @@ def calc_L2(anal, num):
428442

429443
# df_for_plots = df_for_plots.sort_values(by=['is3D', 'Collision_Kernel', 'D', 'BC_order'])
430444
df_for_plots_merged_inner.to_pickle(
431-
f"./pickled_df_Da_{Da0:.2e}_dense2sparse_samples_{nsamples}.pkl")
445+
f"./pickled_df_Da_{Da:.2e}_dense2sparse_samples_{nsamples}.pkl")
432446
print(df_for_plots_merged_inner)
433447

434448

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,43 @@
1+
2+
# setup - simulation
3+
MLBUps = 200 # single NVidia V100 (rysy) - 200 MLBUps on D3Q27Q27
4+
nGPUS = 100 # number of GPUs
5+
avg_velocity = 0.01 # [lattice units/iteration]
6+
domain_length = 512 # lattice units
7+
fluid_pass = 10 # fluid pass through the domain
8+
iterations = (1./avg_velocity)*domain_length*fluid_pass
9+
lattice_size = domain_length**3
10+
# lattice_size = 3*domain_length * domain_length/2 # consider a slice of minimal width: 3 lattice units
11+
wall_clock_time_s = iterations*lattice_size/(MLBUps*1E6*nGPUS)
12+
13+
print(f"Wall clock time: {wall_clock_time_s/(60*24):.2f} [days] on {nGPUS} GPUs; "
14+
f"where domain is a {domain_length}^3 [lu] cube")
15+
16+
17+
# setup - storage
18+
DF_Bytes = 2*27*8 # D3Q27Q27 - double
19+
DF_temp_Bytes = DF_Bytes # temp array
20+
q_Bytes = 2*27*(1/4)*8 # IBB distance from intersection
21+
flag_Bytes = 4 # node type
22+
23+
macroscopic_rock_porosity_field_Bytes = 8 # for isotropic, non-homogeneous Darcy model
24+
25+
memory_per_lattice_node_MB = (DF_Bytes
26+
+DF_temp_Bytes
27+
+q_Bytes # without IBB
28+
# +macroscopic_rock_porosity_field_Bytes
29+
+flag_Bytes ) /1E6
30+
31+
required_gpu_memory_MB = memory_per_lattice_node_MB*lattice_size
32+
print(f"Required GPU memory during simulation: {required_gpu_memory_MB/1E3:.2f} [GB]")
33+
34+
macroscopic_velocity_field_Bytes = 3 * 8 # Vx,Vy,Vz
35+
macroscopic_fluid_density_field_Bytes = 8 # rho
36+
37+
mem_per_node_MB = (macroscopic_velocity_field_Bytes
38+
+ macroscopic_fluid_density_field_Bytes
39+
+ macroscopic_rock_porosity_field_Bytes
40+
+ flag_Bytes) / 1E6
41+
42+
total_storage_size_MB = mem_per_node_MB*lattice_size
43+
print(f"Total HDD storage size for single snapshot: {total_storage_size_MB/1E3:.2f} [GB]")
Lines changed: 113 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,113 @@
1+
import pandas as pd
2+
import numpy as np
3+
import matplotlib.pyplot as plt
4+
import matplotlib.ticker
5+
import matplotlib.pylab as pylab
6+
import os
7+
8+
# df_Pr10 = pd.read_csv('df_Pr10.csv')
9+
# df_Pr100 = pd.read_csv('df_Pr100.csv')
10+
# df_Pr1000 = pd.read_csv('df_Pr1000.csv')
11+
12+
new_df = pd.read_csv('EnhancedTable.csv')
13+
legend_df = pd.read_csv('EnhancedTableLegend.csv')
14+
15+
16+
17+
df = new_df.transpose()
18+
case_sizes = df.iloc[0]
19+
df.rename(columns=case_sizes, inplace=True)
20+
df.drop(df.index[0], inplace=True)
21+
case_sizes = list(case_sizes)
22+
23+
# case_sizes_set = case_sizes[:3] # Out[13]: ['Pr10_small', 'Pr10_medium', 'Pr10_large']
24+
# yminmax= [0, max(df[case_sizes_set[0]])]
25+
# fig_name = f'plots/kernel_bar_plot_Pr10'
26+
27+
# case_sizes_set = case_sizes[3:6] # Out[10]: ['Pr100_small', 'Pr100_medium', 'Pr100_large']
28+
# yminmax= [0, max(df[case_sizes_set[0]])]
29+
# fig_name = f'plots/kernel_bar_plot_Pr100'
30+
31+
case_sizes_set = case_sizes[6:] # Out[12]: ['Pr1000_small', 'Pr1000_medium', 'Pr1000_large']
32+
yminmax= [min(df[case_sizes_set[0]]), max(df[case_sizes_set[0]])]
33+
fig_name = f'plots/kernel_bar_plot_Pr1000'
34+
35+
36+
37+
case_names = list(new_df.columns.values[1:])
38+
case_names = [w.replace('Nu_CM_HIGHER_1st_order_bc', '$CM-TRT_{1st\; order\; bc}$') for w in case_names]
39+
case_names = [w.replace('Nu_CM_HIGHER_2nd_order_bc', '$CM-TRT_{2nd\; order\; bc}$') for w in case_names]
40+
41+
case_names = [w.replace('Nu_Cumulants_1st_order_bc', '$Cumulants-1^{st}_{1st\; order\; bc}$') for w in case_names]
42+
case_names = [w.replace('Nu_Cumulants_2nd_order_bc', '$Cumulants-1^{st}_{2nd\; order\; bc}$') for w in case_names]
43+
44+
case_names = [w.replace('Nu_CM_1st_order_bc', '$CM-1^{st}_{1st\; order\; bc}$') for w in case_names]
45+
case_names = [w.replace('Nu_CM_2nd_order_bc', '$CM-1^{st}_{2nd\; order\; bc}$') for w in case_names]
46+
47+
case_names = [w.replace('Nu_BGK_1st_order_bc', '$CM-SRT_{1st\; order\; bc}$') for w in case_names]
48+
case_names = [w.replace('Nu_BGK_2nd_order_bc', '$CM-SRT_{2nd\; order\; bc}$') for w in case_names]
49+
50+
51+
52+
53+
# ------------------------------------ PLOT ------------------------------------
54+
if not os.path.exists('plots'):
55+
os.makedirs('plots')
56+
57+
58+
params = {'legend.fontsize': 'xx-large',
59+
# 'figure.figsize': (14, 8),
60+
'figure.figsize': (18, 9),
61+
'axes.labelsize': 'xx-large',
62+
'axes.titlesize':'xx-large',
63+
'xtick.labelsize':'xx-large',
64+
'ytick.labelsize':'xx-large'}
65+
pylab.rcParams.update(params)
66+
#
67+
68+
bar_width = 0.75
69+
70+
71+
pos = np.arange(len(case_names))
72+
plt.subplot(1,3,1)
73+
for i in range(len(case_names)):
74+
plt.bar(x=pos[i], height=df[case_sizes_set[0]][i], width=bar_width, label=case_names[i])
75+
plt.xticks([],[])
76+
plt.ylim(yminmax)
77+
plt.ylabel('Nu')
78+
plt.grid(True, axis='y')
79+
plt.title(case_sizes_set[0], fontsize=22)
80+
81+
#The below code will create the second plot.
82+
plt.subplot(1,3,2)
83+
for i in range(len(case_names)):
84+
plt.bar(x=pos[i], height=df[case_sizes_set[1]][i], width=bar_width, label=case_names[i])
85+
plt.xticks([], [])
86+
plt.ylim(yminmax)
87+
plt.grid(True, axis='y')
88+
plt.title(case_sizes_set[1], fontsize=22)
89+
90+
#The below code will create the third plot.
91+
ax = plt.subplot(1,3,3)
92+
for i in range(len(case_names)):
93+
plt.bar(x=pos[i], height=df[case_sizes_set[2]][i], width=bar_width, label=case_names[i])
94+
plt.xticks([],[])
95+
plt.ylim(yminmax)
96+
plt.grid(True, axis='y')
97+
plt.title(case_sizes_set[2], fontsize=22)
98+
99+
100+
fig = plt.gcf() # get current figure
101+
# fig.suptitle('Influence of kernel and BC on Nu number', fontsize=26)
102+
fig.tight_layout()
103+
fig.subplots_adjust(bottom=0.2)
104+
# https://stackoverflow.com/questions/4700614/how-to-put-the-legend-out-of-the-plot
105+
ax.legend(loc='upper center', bbox_to_anchor=(-0.725, -0.05),
106+
fancybox=True, shadow=True, ncol=5)
107+
plt.show()
108+
# plt.xlabel(r'$edge \, length \, [lu]}$', fontsize=22)
109+
# plt.ylabel(r'$mem \, to \, allocate\, [GB]$', fontsize=22)
110+
#
111+
fig.savefig(fig_name+'.png', bbox_inches='tight')
112+
fig.savefig(fig_name+'.pdf', bbox_inches='tight')
113+
plt.show()

0 commit comments

Comments
 (0)