diff --git a/.zenodo.json b/.zenodo.json index 56c2a93ea..694cd431b 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -1,6 +1,5 @@ { - "metadata": { - "description": "Elliptical Parcel-in-Cell model for fluid dynamics", + "description": "The Elliptical Parcel-In-Cell (EPIC) model for fluid dynamics", "access_right": "open", "upload_type": "software", "keywords": ["PIC", @@ -24,6 +23,11 @@ "affiliation": "University of St Andrews", "name": "Dritschel, David", "orcid": "0000-0001-6489-3395" + }, + { + "affiliation": "Edinburgh Parallel Computing Centre (EPCC)", + "name": "ApĆ³stolo, Rui", + "orcid": "0000-0003-1161-9098" } ], "grants": [ @@ -34,5 +38,4 @@ "id": "10.13039/100014013::EP/T025409/1" } ] - } } diff --git a/CHANGELOG.md b/CHANGELOG.md index b898cfd13..cb068192b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,19 @@ +## EPIC version 0.14.1 +* [Added/changed ifort style flags](https://github.com/EPIC-model/epic/pull/494) +* [Fix .zenodo.json](https://github.com/EPIC-model/epic/pull/542) +* [Generalise configure.ac](https://github.com/EPIC-model/epic/pull/492) +* [Selective parcel attribute writes](https://github.com/EPIC-model/epic/pull/543) +* [Need to calculate buoyancy extrema for total buoyancy](https://github.com/EPIC-model/epic/pull/546) +* [Update writing statistics to improved interface](https://github.com/EPIC-model/epic/pull/545) +* [Fix rk_timer](https://github.com/EPIC-model/epic/pull/547) +* [Collect merger statistics](https://github.com/EPIC-model/epic/pull/544) +* [Update conda environment](https://github.com/EPIC-model/epic/pull/550) +* [Damping implementation](https://github.com/EPIC-model/epic/pull/549) +* [Add missing stop_timer; fix resetting parcel split and merge diagnostics](https://github.com/EPIC-model/epic/pull/552) +* [Fix loop for nearest parcel check](https://github.com/EPIC-model/epic/pull/555) +* [Gridded strain](https://github.com/EPIC-model/epic/pull/553) +* [Ensure link exists before calling netcdf routine](https://github.com/EPIC-model/epic/pull/558) + ## EPIC version 0.14.0 * [Fix the write frequency](https://github.com/EPIC-model/epic/pull/527) * [Add boundary surface fluxes](https://github.com/EPIC-model/epic/pull/451) diff --git a/configure.ac b/configure.ac index f6fd1c184..02631d328 100644 --- a/configure.ac +++ b/configure.ac @@ -1,4 +1,4 @@ -AC_INIT([epic], [0.14.0], [mf248@st-andrews.ac.uk], [], [https://github.com/matt-frey/epic]) +AC_INIT([epic], [0.14.1], [mf248@st-andrews.ac.uk], [], [https://github.com/matt-frey/epic]) AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects]) AC_PROG_FC([gfortran]) AC_LANG(Fortran) @@ -8,8 +8,7 @@ LT_INIT # change file extension from *.f to *.f90 # (important for library tests since it autogenerates a file conftest.f90) AC_FC_SRCEXT(f90) - -FCFLAGS="-std=f2018 -fdefault-real-8 -fdefault-double-8 -cpp -mcmodel=large -fall-intrinsics" +AC_FC_PP_SRCEXT(f90) AC_CONFIG_MACRO_DIRS([m4]) AC_CONFIG_HEADERS([src/config.h]) @@ -41,6 +40,31 @@ AC_CONFIG_FILES([ run-script/Makefile ]) +# 30 May 2023 +# https://www.gnu.org/software/autoconf-archive/ax_compiler_vendor.html +# https://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +AX_COMPILER_VENDOR + +if test "$ax_cv_fc_compiler_vendor" = "unknown"; then + AC_MSG_ERROR([Could not deduce compiler vendor.]) +fi + +AX_CHECK_COMPILE_FLAG([-cpp], [FCFLAGS="$FCFLAGS -cpp"]) +AX_CHECK_COMPILE_FLAG([-mcmodel=large], [FCFLAGS="$FCFLAGS -mcmodel=large"]) + +# gfortran compiler flags +AX_CHECK_COMPILE_FLAG([-std=f2018], [FCFLAGS="$FCFLAGS -std=f2018"]) +AX_CHECK_COMPILE_FLAG([-fall-intrinsics], [FCFLAGS="$FCFLAGS -fall-intrinsics"]) +AX_CHECK_COMPILE_FLAG([-fdefault-real-8], [FCFLAGS="$FCFLAGS -fdefault-real-8"]) +AX_CHECK_COMPILE_FLAG([-fdefault-double-8], [FCFLAGS="$FCFLAGS -fdefault-double-8"]) + +# intel compiler flags +AX_CHECK_COMPILE_FLAG([-std18], [FCFLAGS="$FCFLAGS -std18"]) +AX_CHECK_COMPILE_FLAG([-fp-model=source], [FCFLAGS="$FCFLAGS -fp-model=source"]) +AX_CHECK_COMPILE_FLAG([-real-size 8], [FCFLAGS="$FCFLAGS -real-size 8"]) +AX_CHECK_COMPILE_FLAG([-double-size 64], [FCFLAGS="$FCFLAGS -double-size 64"]) + + ####################################################################################### ## ## "--with" flags @@ -468,11 +492,36 @@ AM_CONDITIONAL([ENABLE_DEBUG], [test "$ENABLE_DEBUG" = "yes"]) AC_MSG_CHECKING([whether we are compiling in debug mode]) if test "x$ENABLE_DEBUG" = "xyes"; then AC_MSG_RESULT([yes]) - FCFLAGS="$FCFLAGS -Wall -Wuninitialized -Wmaybe-uninitialized -Werror -g -O0" - FCFLAGS="$FCFLAGS -fcheck=all -fbounds-check -fbacktrace -ffpe-trap=denormal,invalid,zero,overflow,underflow" + AX_CHECK_COMPILE_FLAG([-g], [FCFLAGS="$FCFLAGS -g"]) + AX_CHECK_COMPILE_FLAG([-O0], [FCFLAGS="$FCFLAGS -O0"]) + + # gfortran compiler flags + AX_CHECK_COMPILE_FLAG([-Wall], [FCFLAGS="$FCFLAGS -Wall"]) + AX_CHECK_COMPILE_FLAG([-Wuninitialized], [FCFLAGS="$FCFLAGS -Wuninitialized"]) + AX_CHECK_COMPILE_FLAG([-Wmaybe-uninitialized], [FCFLAGS="$FCFLAGS -Wmaybe-uninitialized"]) + AX_CHECK_COMPILE_FLAG([-Werror], [FCFLAGS="$FCFLAGS -Werror"]) + AX_CHECK_COMPILE_FLAG([-fcheck=all], [FCFLAGS="$FCFLAGS -fcheck=all"]) + AX_CHECK_COMPILE_FLAG([-fbounds-check], [FCFLAGS="$FCFLAGS -fbounds-check"]) + AX_CHECK_COMPILE_FLAG([-fbacktrace], [FCFLAGS="$FCFLAGS -fbacktrace"]) + AX_CHECK_COMPILE_FLAG([-ffpe-trap=denormal,invalid,zero,overflow,underflow], + [FCFLAGS="$FCFLAGS -ffpe-trap=denormal,invalid,zero,overflow,underflow"]) + + # intel compiler flags + AX_CHECK_COMPILE_FLAG([-warn all], [FCFLAGS="$FCFLAGS -Wall"]) + AX_CHECK_COMPILE_FLAG([-warn error], [FCFLAGS="$FCFLAGS -warn error"]) + AX_CHECK_COMPILE_FLAG([-debug full], [FCFLAGS="$FCFLAGS -debug full"]) else AC_MSG_RESULT([no]) - FCFLAGS="$FCFLAGS -O3 -funroll-all-loops -flto -DNDEBUG" + AX_CHECK_COMPILE_FLAG([-O3], [FCFLAGS="$FCFLAGS -O3"]) + AX_CHECK_COMPILE_FLAG([-DNDEBUG], [FCFLAGS="$FCFLAGS -DNDEBUG"]) + + # gfortran compiler flags + AX_CHECK_COMPILE_FLAG([-funroll-all-loops], [FCFLAGS="$FCFLAGS -funroll-all-loops"]) + AX_CHECK_COMPILE_FLAG([-flto], [FCFLAGS="$FCFLAGS -flto"]) + + # intel compiler flags + AX_CHECK_COMPILE_FLAG([-funroll-loops], [FCFLAGS="$FCFLAGS -funroll-loops"]) + AX_CHECK_COMPILE_FLAG([-ffat-lto-objects], [FCFLAGS="$FCFLAGS -ffat-lto-objects"]) fi @@ -505,7 +554,11 @@ AM_CONDITIONAL([ENABLE_OPENMP], [test "$ENABLE_OPENMP" = "yes"]) AC_MSG_CHECKING([whether we are enabling OpenMP]) if test "x$ENABLE_OPENMP" = "xyes"; then AC_MSG_RESULT([yes]) - FCFLAGS="$FCFLAGS -fopenmp -DENABLE_OPENMP" + # gfortran compiler flags + AX_CHECK_COMPILE_FLAG([-fopenmp], [FCFLAGS="$FCFLAGS -fopenmp -DENABLE_OPENMP"]) + + # intel compiler flags + AX_CHECK_COMPILE_FLAG([-qopenmp], [FCFLAGS="$FCFLAGS -qopenmp -DENABLE_OPENMP"]) else AC_MSG_RESULT([no]) fi diff --git a/examples/moist.config b/examples/moist.config index 13b3d6e28..a9a5172e0 100755 --- a/examples/moist.config +++ b/examples/moist.config @@ -9,7 +9,7 @@ ! output info ! output%field_freq = 50 ![s] write after these many seconds to the field NetCDF file - output%parcel_freq = 50 ![s] write after these many seconds to the parcel NetCDF file + output%parcel_freq = 200 ![s] write after these many seconds to the parcel NetCDF file output%parcel_stats_freq = 50 ![s] write after these many seconds to parcel stats NetCDF file output%field_stats_freq = 50 ![s] write after these many seconds to the field stats NetCDF file output%write_fields = .true. ! enable / disable field dump @@ -35,4 +35,10 @@ time%limit = 1000.0 ! time limit (s) time%alpha = 0.2 ! scaling factor for the strain and buoyancy gradient time step time%precise_stop = .false. ! time limit exact + + ! + ! damping info + ! + damping%l_vorticity = .false. + damping%l_scalars = .false. / diff --git a/m4/ax_check_compile_flag.m4 b/m4/ax_check_compile_flag.m4 new file mode 100644 index 000000000..bd753b34d --- /dev/null +++ b/m4/ax_check_compile_flag.m4 @@ -0,0 +1,53 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS], [INPUT]) +# +# DESCRIPTION +# +# Check whether the given FLAG works with the current language's compiler +# or gives an error. (Warnings, however, are ignored) +# +# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on +# success/failure. +# +# If EXTRA-FLAGS is defined, it is added to the current language's default +# flags (e.g. CFLAGS) when the check is done. The check is thus made with +# the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to +# force the compiler to issue an error when a bad flag is given. +# +# INPUT gives an alternative input source to AC_COMPILE_IFELSE. +# +# NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this +# macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. +# +# LICENSE +# +# Copyright (c) 2008 Guido U. Draheim +# Copyright (c) 2011 Maarten Bosmans +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 6 + +AC_DEFUN([AX_CHECK_COMPILE_FLAG], +[AC_PREREQ(2.64)dnl for _AC_LANG_PREFIX and AS_VAR_IF +AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl +AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ + ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS + _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" + AC_COMPILE_IFELSE([m4_default([$5],[AC_LANG_PROGRAM()])], + [AS_VAR_SET(CACHEVAR,[yes])], + [AS_VAR_SET(CACHEVAR,[no])]) + _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) +AS_VAR_IF(CACHEVAR,yes, + [m4_default([$2], :)], + [m4_default([$3], :)]) +AS_VAR_POPDEF([CACHEVAR])dnl +])dnl AX_CHECK_COMPILE_FLAGS diff --git a/m4/ax_compiler_vendor.m4 b/m4/ax_compiler_vendor.m4 new file mode 100644 index 000000000..039f99d2b --- /dev/null +++ b/m4/ax_compiler_vendor.m4 @@ -0,0 +1,119 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_compiler_vendor.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_COMPILER_VENDOR +# +# DESCRIPTION +# +# Determine the vendor of the C, C++ or Fortran compiler. The vendor is +# returned in the cache variable $ax_cv_c_compiler_vendor for C, +# $ax_cv_cxx_compiler_vendor for C++ or $ax_cv_fc_compiler_vendor for +# (modern) Fortran. The value is one of "intel", "ibm", "pathscale", +# "clang" (LLVM), "cray", "fujitsu", "sdcc", "sx", "nvhpc" (NVIDIA HPC +# Compiler), "portland" (PGI), "gnu" (GCC), "sun" (Oracle Developer +# Studio), "hp", "dec", "borland", "comeau", "kai", "lcc", "sgi", +# "microsoft", "metrowerks", "watcom", "tcc" (Tiny CC) or "unknown" (if +# the compiler cannot be determined). +# +# To check for a Fortran compiler, you must first call AC_FC_PP_SRCEXT +# with an appropriate preprocessor-enabled extension. For example: +# +# AC_LANG_PUSH([Fortran]) +# AC_PROG_FC +# AC_FC_PP_SRCEXT([F]) +# AX_COMPILER_VENDOR +# AC_LANG_POP([Fortran]) +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2008 Matteo Frigo +# Copyright (c) 2018-19 John Zaitseff +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 32 + +AC_DEFUN([AX_COMPILER_VENDOR], [dnl + AC_CACHE_CHECK([for _AC_LANG compiler vendor], ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor, [dnl + dnl If you modify this list of vendors, please add similar support + dnl to ax_compiler_version.m4 if at all possible. + dnl + dnl Note: Do NOT check for GCC first since some other compilers + dnl define __GNUC__ to remain compatible with it. Compilers that + dnl are very slow to start (such as Intel) are listed first. + + vendors=" + intel: __ICC,__ECC,__INTEL_COMPILER + ibm: __xlc__,__xlC__,__IBMC__,__IBMCPP__,__ibmxl__ + pathscale: __PATHCC__,__PATHSCALE__ + clang: __clang__ + cray: _CRAYC + fujitsu: __FUJITSU + sdcc: SDCC,__SDCC + sx: _SX + nvhpc: __NVCOMPILER + portland: __PGI + gnu: __GNUC__ + sun: __SUNPRO_C,__SUNPRO_CC,__SUNPRO_F90,__SUNPRO_F95 + hp: __HP_cc,__HP_aCC + dec: __DECC,__DECCXX,__DECC_VER,__DECCXX_VER + borland: __BORLANDC__,__CODEGEARC__,__TURBOC__ + comeau: __COMO__ + kai: __KCC + lcc: __LCC__ + sgi: __sgi,sgi + microsoft: _MSC_VER + metrowerks: __MWERKS__ + watcom: __WATCOMC__ + tcc: __TINYC__ + unknown: UNKNOWN + " + for ventest in $vendors; do + case $ventest in + *:) + vendor=$ventest + continue + ;; + *) + vencpp="defined("`echo $ventest | sed 's/,/) || defined(/g'`")" + ;; + esac + + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [[ +#if !($vencpp) + thisisanerror; +#endif + ]])], [break]) + done + + ax_cv_[]_AC_LANG_ABBREV[]_compiler_vendor=`echo $vendor | cut -d: -f1` + ]) +])dnl diff --git a/src/3d/Makefile.am b/src/3d/Makefile.am index 4f2974e7c..62b18565e 100644 --- a/src/3d/Makefile.am +++ b/src/3d/Makefile.am @@ -34,6 +34,7 @@ epic3d_SOURCES = \ boundary_layer/bndry_fluxes.f90 \ utils/utils.f90 \ stepper/rk_utils.f90 \ + parcels/parcel_damping.f90 \ stepper/ls_rk.f90 \ epic3d.f90 diff --git a/src/3d/epic3d.f90 b/src/3d/epic3d.f90 index b2da72896..3cd68c58c 100644 --- a/src/3d/epic3d.f90 +++ b/src/3d/epic3d.f90 @@ -22,6 +22,7 @@ program epic3d use parcel_diagnostics, only : parcel_stats_timer use parcel_netcdf, only : parcel_io_timer use parcel_diagnostics_netcdf, only : parcel_stats_io_timer + use parcel_damping, only : damping_timer use fields use field_netcdf, only : field_io_timer use field_diagnostics, only : field_stats_timer @@ -89,6 +90,7 @@ subroutine pre_run call register_timer('merge tree resolve', merge_tree_resolve_timer) call register_timer('p2g/v2g halo (non-excl.)', halo_swap_timer) call register_timer('boundary fluxes', bndry_flux_timer) + call register_timer('damping', damping_timer) call start_timer(epic_timer) diff --git a/src/3d/fields/field_diagnostics.f90 b/src/3d/fields/field_diagnostics.f90 index 3b9d88a28..17b373f28 100644 --- a/src/3d/fields/field_diagnostics.f90 +++ b/src/3d/fields/field_diagnostics.f90 @@ -11,6 +11,9 @@ module field_diagnostics use mpi_collectives, only : mpi_blocking_reduce use physics, only : ape_calculation use ape_density, only : ape_den +#ifdef ENABLE_BUOYANCY_PERTURBATION_MODE + use physics, only : bfsq +#endif implicit none integer :: field_stats_timer @@ -62,10 +65,25 @@ subroutine calculate_field_diagnostics field_stats(IDX_AVG_NSPAR) = sum(nsparg(lo(3):hi(3)-1, lo(2):hi(2), lo(1):hi(1))) * ncelli +#ifdef ENABLE_BUOYANCY_PERTURBATION_MODE + ! add basic state + do iz = 0, nz + z(iz) = lower(3) + dble(iz) * dx(3) + tbuoyg(iz, :, :) = tbuoyg(iz, :, :) + bfsq * z(iz) + enddo +#endif + ! do not take halo cells into account field_stats(IDX_MIN_BUOY) = minval(tbuoyg(lo(3):hi(3), lo(2):hi(2), lo(1):hi(1))) field_stats(IDX_MAX_BUOY) = maxval(tbuoyg(lo(3):hi(3), lo(2):hi(2), lo(1):hi(1))) +#ifdef ENABLE_BUOYANCY_PERTURBATION_MODE + ! remove basic state + do iz = 0, nz + tbuoyg(iz, :, :) = tbuoyg(iz, :, :) - bfsq * z(iz) + enddo +#endif + ! use half weights for boundary grid points field_stats(IDX_KEG) = f12 * sum( volg(1:nz-1, lo(2):hi(2), lo(1):hi(1)) & * (velog(1:nz-1, lo(2):hi(2), lo(1):hi(1), 1) ** 2 & diff --git a/src/3d/fields/field_diagnostics_netcdf.f90 b/src/3d/fields/field_diagnostics_netcdf.f90 index 8b17a8d44..3088f8fe9 100644 --- a/src/3d/fields/field_diagnostics_netcdf.f90 +++ b/src/3d/fields/field_diagnostics_netcdf.f90 @@ -20,20 +20,28 @@ module field_diagnostics_netcdf character(len=512) :: ncfname integer :: ncid - integer :: t_axis_id, t_dim_id, n_writes, & - rms_v_id, abserr_v_id, max_npar_id, min_npar_id, & - avg_npar_id, avg_nspar_id, keg_id, eng_id, apeg_id, & - max_buoy_id, min_buoy_id - + integer :: t_axis_id, t_dim_id, n_writes double precision :: restart_time - - integer :: field_stats_io_timer + integer :: field_stats_io_timer + + integer, parameter :: NC_RMS_VOL = 1 & + , NC_ABS_ERR_VOL = 2 & + , NC_MAX_NPAR = 3 & + , NC_MIN_NPAR = 4 & + , NC_AVG_NPAR = 5 & + , NC_AVG_NSPAR = 6 & + , NC_KE = 7 & + , NC_EN = 8 & + , NC_APE = 9 & + , NC_MIN_BUOY = 10 & + , NC_MAX_BUOY = 11 + + type(netcdf_info) :: nc_dset(NC_MAX_BUOY) public :: create_netcdf_field_stats_file, & write_netcdf_field_stats, & field_stats_io_timer - contains ! Create the NetCDF field diagnostic file. @@ -44,11 +52,18 @@ subroutine create_netcdf_field_stats_file(basename, overwrite, l_restart) logical, intent(in) :: overwrite logical, intent(in) :: l_restart logical :: l_exist + integer :: n if (world%rank .ne. world%root) then return endif + call set_netcdf_field_diagnostics_info + + nc_dset(:)%l_enabled = .true. + + nc_dset(NC_APE)%l_enabled = (ape_calculation == 'ape density') + ncfname = basename // '_field_stats.nc' restart_time = -one @@ -86,118 +101,20 @@ subroutine create_netcdf_field_stats_file(basename, overwrite, l_restart) call define_netcdf_temporal_dimension(ncid, t_dim_id, t_axis_id) - call define_netcdf_dataset( & - ncid=ncid, & - name='rms_v', & - long_name='relative rms volume error', & - std_name='', & - unit='1', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=rms_v_id) - - - call define_netcdf_dataset( & - ncid=ncid, & - name='abserr_v', & - long_name='max absolute normalised volume error', & - std_name='', & - unit='m^3', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=abserr_v_id) - - call define_netcdf_dataset( & - ncid=ncid, & - name='max_npar', & - long_name='max num parcels per cell', & - std_name='', & - unit='1', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=max_npar_id) - - call define_netcdf_dataset( & - ncid=ncid, & - name='min_npar', & - long_name='min num parcels per cell', & - std_name='', & - unit='1', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=min_npar_id) - - call define_netcdf_dataset( & - ncid=ncid, & - name='avg_npar', & - long_name='average num parcels per cell', & - std_name='', & - unit='1', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=avg_npar_id) - - call define_netcdf_dataset( & - ncid=ncid, & - name='avg_nspar', & - long_name='average num small parcels per cell', & - std_name='', & - unit='1', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=avg_nspar_id) - - call define_netcdf_dataset( & - ncid=ncid, & - name='ke', & - long_name='domain-averaged kinetic energy', & - std_name='', & - unit='m^2/s^2', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=keg_id) - - if (ape_calculation == 'ape density') then - call define_netcdf_dataset( & - ncid=ncid, & - name='ape', & - long_name='domain-averaged available potential energy', & - std_name='', & - unit='m^2/s^2', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=apeg_id) - endif + ! define field diagnostics + do n = 1, size(nc_dset) + if (nc_dset(n)%l_enabled) then + call define_netcdf_dataset(ncid=ncid, & + name=nc_dset(n)%name, & + long_name=nc_dset(n)%long_name, & + std_name=nc_dset(n)%std_name, & + unit=nc_dset(n)%unit, & + dtype=nc_dset(n)%dtype, & + dimids=(/t_dim_id/), & + varid=nc_dset(n)%varid) - call define_netcdf_dataset( & - ncid=ncid, & - name='en', & - long_name='domain-averaged enstrophy', & - std_name='', & - unit='1/s^2', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=eng_id) - - call define_netcdf_dataset( & - ncid=ncid, & - name='min_buoyancy', & - long_name='minimum gridded buoyancy', & - std_name='', & - unit='m/s^2', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=min_buoy_id) - - call define_netcdf_dataset( & - ncid=ncid, & - name='max_buoyancy', & - long_name='maximum gridded buoyancy', & - std_name='', & - unit='m/s^2', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=max_buoy_id) + endif + enddo call close_definition(ncid) @@ -205,36 +122,19 @@ subroutine create_netcdf_field_stats_file(basename, overwrite, l_restart) end subroutine create_netcdf_field_stats_file - ! Pre-condition: Assumes an open file + ! Pre-condition: Assumes an open file and nc_dset being initialised. subroutine read_netcdf_field_stats_content + integer :: n call get_dim_id(ncid, 't', t_dim_id) call get_var_id(ncid, 't', t_axis_id) - call get_var_id(ncid, 'rms_v', rms_v_id) - - call get_var_id(ncid, 'abserr_v', abserr_v_id) - - call get_var_id(ncid, 'max_npar', max_npar_id) - - call get_var_id(ncid, 'min_npar', min_npar_id) - - call get_var_id(ncid, 'avg_npar', avg_npar_id) - - call get_var_id(ncid, 'avg_nspar', avg_nspar_id) - - call get_var_id(ncid, 'ke', keg_id) - - if (ape_calculation == 'ape density') then - call get_var_id(ncid, 'ape', apeg_id) - endif - - call get_var_id(ncid, 'en', eng_id) - - call get_var_id(ncid, 'min_buoyancy', min_buoy_id) - - call get_var_id(ncid, 'max_buoyancy', max_buoy_id) + do n = 1, size(nc_dset) + if (nc_dset(n)%l_enabled) then + call get_var_id(ncid, nc_dset(n)%name, nc_dset(n)%varid) + endif + enddo end subroutine read_netcdf_field_stats_content @@ -268,19 +168,17 @@ subroutine write_netcdf_field_stats(t) ! ! write diagnostics ! - call write_netcdf_scalar(ncid, rms_v_id, field_stats(IDX_RMS_V), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, abserr_v_id, field_stats(IDX_ABSERR_V), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, max_npar_id, field_stats(IDX_MAX_NPAR), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, min_npar_id, field_stats(IDX_MIN_NPAR), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, avg_npar_id, field_stats(IDX_AVG_NPAR), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, avg_nspar_id, field_stats(IDX_AVG_NSPAR), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, keg_id, field_stats(IDX_KEG), n_writes, l_serial=.true.) - if (ape_calculation == 'ape density') then - call write_netcdf_scalar(ncid, apeg_id, field_stats(IDX_APEG), n_writes, l_serial=.true.) - endif - call write_netcdf_scalar(ncid, eng_id, field_stats(IDX_ENG), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, min_buoy_id, field_stats(IDX_MIN_BUOY), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, max_buoy_id, field_stats(IDX_MAX_BUOY), n_writes, l_serial=.true.) + call write_diagnostic(NC_RMS_VOL, field_stats(IDX_RMS_V)) + call write_diagnostic(NC_ABS_ERR_VOL, field_stats(IDX_ABSERR_V)) + call write_diagnostic(NC_MAX_NPAR, field_stats(IDX_MAX_NPAR)) + call write_diagnostic(NC_MIN_NPAR, field_stats(IDX_MIN_NPAR)) + call write_diagnostic(NC_AVG_NPAR, field_stats(IDX_AVG_NPAR)) + call write_diagnostic(NC_AVG_NSPAR, field_stats(IDX_AVG_NSPAR)) + call write_diagnostic(NC_KE, field_stats(IDX_KEG)) + call write_diagnostic(NC_APE, field_stats(IDX_APEG)) + call write_diagnostic(NC_EN, field_stats(IDX_ENG)) + call write_diagnostic(NC_MIN_BUOY, field_stats(IDX_MIN_BUOY)) + call write_diagnostic(NC_MAX_BUOY, field_stats(IDX_MAX_BUOY)) ! increment counter n_writes = n_writes + 1 @@ -291,4 +189,106 @@ subroutine write_netcdf_field_stats(t) end subroutine write_netcdf_field_stats + !:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + subroutine write_diagnostic(id, val) + integer, intent(in) :: id + double precision, intent(in) :: val + + if (nc_dset(id)%l_enabled) then + if (nc_dset(id)%dtype == NF90_INT) then + call write_netcdf_scalar(ncid, nc_dset(id)%varid, int(val), & + n_writes, l_serial=.true.) + else + call write_netcdf_scalar(ncid, nc_dset(id)%varid, val, & + n_writes, l_serial=.true.) + endif + endif + + end subroutine write_diagnostic + + !:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + subroutine set_netcdf_field_diagnostics_info + + nc_dset(NC_RMS_VOL) = netcdf_info( & + name='rms_v', & + long_name='relative rms volume error', & + std_name='', & + unit='1', & + dtype=NF90_DOUBLE) + + + nc_dset(NC_ABS_ERR_VOL) = netcdf_info( & + name='abserr_v', & + long_name='max absolute normalised volume error', & + std_name='', & + unit='m^3', & + dtype=NF90_DOUBLE) + + nc_dset(NC_MAX_NPAR) = netcdf_info( & + name='max_npar', & + long_name='max num parcels per cell', & + std_name='', & + unit='1', & + dtype=NF90_INT) + + nc_dset(NC_MIN_NPAR) = netcdf_info( & + name='min_npar', & + long_name='min num parcels per cell', & + std_name='', & + unit='1', & + dtype=NF90_INT) + + nc_dset(NC_AVG_NPAR) = netcdf_info( & + name='avg_npar', & + long_name='average num parcels per cell', & + std_name='', & + unit='1', & + dtype=NF90_DOUBLE) + + nc_dset(NC_AVG_NSPAR) = netcdf_info( & + name='avg_nspar', & + long_name='average num small parcels per cell', & + std_name='', & + unit='1', & + dtype=NF90_DOUBLE) + + nc_dset(NC_KE) = netcdf_info( & + name='ke', & + long_name='domain-averaged kinetic energy', & + std_name='', & + unit='m^2/s^2', & + dtype=NF90_DOUBLE) + + nc_dset(NC_APE) = netcdf_info( & + name='ape', & + long_name='domain-averaged available potential energy', & + std_name='', & + unit='m^2/s^2', & + dtype=NF90_DOUBLE) + + nc_dset(NC_EN) = netcdf_info( & + name='en', & + long_name='domain-averaged enstrophy', & + std_name='', & + unit='1/s^2', & + dtype=NF90_DOUBLE) + + nc_dset(NC_MIN_BUOY) = netcdf_info( & + name='min_buoyancy', & + long_name='minimum gridded buoyancy', & + std_name='', & + unit='m/s^2', & + dtype=NF90_DOUBLE) + + nc_dset(NC_MAX_BUOY) = netcdf_info( & + name='max_buoyancy', & + long_name='maximum gridded buoyancy', & + std_name='', & + unit='m/s^2', & + dtype=NF90_DOUBLE) + + end subroutine set_netcdf_field_diagnostics_info + end module field_diagnostics_netcdf diff --git a/src/3d/fields/field_netcdf.f90 b/src/3d/fields/field_netcdf.f90 index 09aad11ab..839d0bdd5 100644 --- a/src/3d/fields/field_netcdf.f90 +++ b/src/3d/fields/field_netcdf.f90 @@ -27,17 +27,6 @@ module field_netcdf integer :: coord_ids(3) ! = (x, y, z) integer :: t_axis_id - - type netcdf_field_info - character(32) :: name = '' - character(128) :: long_name = '' - character(128) :: std_name = '' - character(16) :: unit = '' - integer :: dtype = -1 - integer :: varid = -1 - logical :: l_enabled = .false. - end type netcdf_field_info - integer, parameter :: NC_X_VEL = 1 & , NC_Y_VEL = 2 & , NC_Z_VEL = 3 & @@ -57,9 +46,9 @@ module field_netcdf , NC_HUM = 15 & , NC_LBUOY = 16 - type(netcdf_field_info) :: nc_dset(16) + type(netcdf_info) :: nc_dset(16) #else - type(netcdf_field_info) :: nc_dset(13) + type(netcdf_info) :: nc_dset(13) #endif integer :: n_writes @@ -473,103 +462,103 @@ end subroutine set_netcdf_field_output !:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: subroutine set_netcdf_field_info - nc_dset(NC_X_VEL) = netcdf_field_info(name='x_velocity', & - long_name='x velocity component', & - std_name='', & - unit='m/s', & - dtype=NF90_DOUBLE) - - nc_dset(NC_Y_VEL) = netcdf_field_info(name='y_velocity', & - long_name='y velocity component', & - std_name='', & - unit='m/s', & - dtype=NF90_DOUBLE) - - nc_dset(NC_Z_VEL) = netcdf_field_info(name='z_velocity', & - long_name='z velocity component', & - std_name='', & - unit='m/s', & - dtype=NF90_DOUBLE) - - nc_dset(NC_X_VTEND) = netcdf_field_info(name='x_vorticity_tendency', & - long_name='x vorticity tendency', & - std_name='', & - unit='1/s', & - dtype=NF90_DOUBLE) - - nc_dset(NC_Y_VTEND) = netcdf_field_info(name='y_vorticity_tendency', & - long_name='y vorticity tendency', & - std_name='', & - unit='1/s', & - dtype=NF90_DOUBLE) - - nc_dset(NC_Z_VTEND) = netcdf_field_info(name='z_vorticity_tendency', & - long_name='z vorticity tendency', & - std_name='', & - unit='1/s', & - dtype=NF90_DOUBLE) - - nc_dset(NC_NPARG) = netcdf_field_info(name='nparg', & - long_name='number of parcels per cell', & - std_name='', & - unit='1', & - dtype=NF90_INT) - - nc_dset(NC_NSPARG) = netcdf_field_info(name='nsparg', & - long_name='number of small parcels per cell', & - std_name='', & - unit='1', & - dtype=NF90_INT) - - nc_dset(NC_X_VOR) = netcdf_field_info(name='x_vorticity', & - long_name='x vorticity component', & - std_name='', & - unit='1/s', & - dtype=NF90_DOUBLE) - - nc_dset(NC_Y_VOR) = netcdf_field_info(name='y_vorticity', & - long_name='y vorticity component', & - std_name='', & - unit='1/s', & - dtype=NF90_DOUBLE) - - nc_dset(NC_Z_VOR) = netcdf_field_info(name='z_vorticity', & - long_name='z vorticity component', & - std_name='', & - unit='1/s', & - dtype=NF90_DOUBLE) - - nc_dset(NC_TBUOY) = netcdf_field_info(name='buoyancy', & - long_name='total buoyancy', & - std_name='', & - unit='m/s^2', & - dtype=NF90_DOUBLE) + nc_dset(NC_X_VEL) = netcdf_info(name='x_velocity', & + long_name='x velocity component', & + std_name='', & + unit='m/s', & + dtype=NF90_DOUBLE) + + nc_dset(NC_Y_VEL) = netcdf_info(name='y_velocity', & + long_name='y velocity component', & + std_name='', & + unit='m/s', & + dtype=NF90_DOUBLE) + + nc_dset(NC_Z_VEL) = netcdf_info(name='z_velocity', & + long_name='z velocity component', & + std_name='', & + unit='m/s', & + dtype=NF90_DOUBLE) + + nc_dset(NC_X_VTEND) = netcdf_info(name='x_vorticity_tendency', & + long_name='x vorticity tendency', & + std_name='', & + unit='1/s', & + dtype=NF90_DOUBLE) + + nc_dset(NC_Y_VTEND) = netcdf_info(name='y_vorticity_tendency', & + long_name='y vorticity tendency', & + std_name='', & + unit='1/s', & + dtype=NF90_DOUBLE) + + nc_dset(NC_Z_VTEND) = netcdf_info(name='z_vorticity_tendency', & + long_name='z vorticity tendency', & + std_name='', & + unit='1/s', & + dtype=NF90_DOUBLE) + + nc_dset(NC_NPARG) = netcdf_info(name='nparg', & + long_name='number of parcels per cell', & + std_name='', & + unit='1', & + dtype=NF90_INT) + + nc_dset(NC_NSPARG) = netcdf_info(name='nsparg', & + long_name='number of small parcels per cell', & + std_name='', & + unit='1', & + dtype=NF90_INT) + + nc_dset(NC_X_VOR) = netcdf_info(name='x_vorticity', & + long_name='x vorticity component', & + std_name='', & + unit='1/s', & + dtype=NF90_DOUBLE) + + nc_dset(NC_Y_VOR) = netcdf_info(name='y_vorticity', & + long_name='y vorticity component', & + std_name='', & + unit='1/s', & + dtype=NF90_DOUBLE) + + nc_dset(NC_Z_VOR) = netcdf_info(name='z_vorticity', & + long_name='z vorticity component', & + std_name='', & + unit='1/s', & + dtype=NF90_DOUBLE) + + nc_dset(NC_TBUOY) = netcdf_info(name='buoyancy', & + long_name='total buoyancy', & + std_name='', & + unit='m/s^2', & + dtype=NF90_DOUBLE) #ifndef ENABLE_DRY_MODE - nc_dset(NC_DBUOY) = netcdf_field_info(name='dry_buoyancy', & - long_name='dry buoyancy', & - std_name='', & - unit='m/s^2', & - dtype=NF90_DOUBLE) - - nc_dset(NC_HUM) = netcdf_field_info(name='humidity', & - long_name='specific humidity', & - std_name='', & - unit='kg/kg', & - dtype=NF90_DOUBLE) - - nc_dset(NC_LBUOY) = netcdf_field_info(name='liquid_water_content', & - long_name='liquid-water content', & - std_name='', & - unit='1', & - dtype=NF90_DOUBLE) + nc_dset(NC_DBUOY) = netcdf_info(name='dry_buoyancy', & + long_name='dry buoyancy', & + std_name='', & + unit='m/s^2', & + dtype=NF90_DOUBLE) + + nc_dset(NC_HUM) = netcdf_info(name='humidity', & + long_name='specific humidity', & + std_name='', & + unit='kg/kg', & + dtype=NF90_DOUBLE) + + nc_dset(NC_LBUOY) = netcdf_info(name='liquid_water_content', & + long_name='liquid-water content', & + std_name='', & + unit='1', & + dtype=NF90_DOUBLE) #endif - nc_dset(NC_VOL) = netcdf_field_info(name='volume', & - long_name='volume', & - std_name='', & - unit='m^3', & - dtype=NF90_DOUBLE) + nc_dset(NC_VOL) = netcdf_info(name='volume', & + long_name='volume', & + std_name='', & + unit='m^3', & + dtype=NF90_DOUBLE) end subroutine set_netcdf_field_info end module field_netcdf diff --git a/src/3d/fields/fields.f90 b/src/3d/fields/fields.f90 index 2550fbf90..8a82c6243 100644 --- a/src/3d/fields/fields.f90 +++ b/src/3d/fields/fields.f90 @@ -43,18 +43,22 @@ module fields #ifndef NDEBUG sym_volg, & ! symmetry volume (debug mode only) #endif - volg ! volume scalar field + volg, & ! volume scalar field + strain_mag ! strain magnitude integer, allocatable, dimension(:, :, :) :: & nparg, & ! number of parcels per grid box nsparg ! number of small parcels per grid box - ! velocity strain indices + ! velocity strain indices (note that dw/dz is found from continuity) integer, parameter :: I_DUDX = 1 & ! index for du/dx strain component , I_DUDY = 2 & ! index for du/dy strain component - , I_DVDY = 3 & ! index for dv/dy strain component - , I_DWDX = 4 & ! index for dw/dx strain component - , I_DWDY = 5 ! index for dw/dy strain component + , I_DUDZ = 3 & ! index for du/dz strain component + , I_DVDX = 4 & ! index for dv/dx strain component + , I_DVDY = 5 & ! index for dv/dy strain component + , I_DVDZ = 6 & ! index for dv/dz strain component + , I_DWDX = 7 & ! index for dw/dx strain component + , I_DWDY = 8 ! index for dw/dy strain component contains @@ -74,9 +78,10 @@ subroutine field_alloc hhi = box%hhi allocate(velog(hlo(3):hhi(3), hlo(2):hhi(2), hlo(1):hhi(1), n_dim)) - allocate(velgradg(hlo(3):hhi(3), hlo(2):hhi(2), hlo(1):hhi(1), 5)) + allocate(velgradg(hlo(3):hhi(3), hlo(2):hhi(2), hlo(1):hhi(1), 8)) allocate(volg(hlo(3):hhi(3), hlo(2):hhi(2), hlo(1):hhi(1))) + allocate(strain_mag(hlo(3):hhi(3), hlo(2):hhi(2), hlo(1):hhi(1))) #ifndef NDEBUG allocate(sym_volg(hlo(3):hhi(3), hlo(2):hhi(2), hlo(1):hhi(1))) @@ -107,6 +112,7 @@ subroutine field_default velog = zero velgradg = zero volg = zero + strain_mag = zero vortg = zero vtend = zero tbuoyg = zero @@ -130,12 +136,13 @@ subroutine field_dealloc deallocate(velog) deallocate(velgradg) deallocate(volg) + deallocate(strain_mag) deallocate(vortg) deallocate(vtend) deallocate(tbuoyg) #ifndef ENABLE_DRY_MODE deallocate(dbuoyg) - deallocate(humg ) + deallocate(humg) #endif deallocate(nparg) deallocate(nsparg) diff --git a/src/3d/inversion/inversion.f90 b/src/3d/inversion/inversion.f90 index 5963bf366..c407e7f99 100644 --- a/src/3d/inversion/inversion.f90 +++ b/src/3d/inversion/inversion.f90 @@ -305,6 +305,13 @@ subroutine vel2vgrad(svel) velgradg(nz+1, :, :, I_DWDY) = -velgradg(nz-1, :, :, I_DWDY) !$omp end parallel workshare + !$omp parallel workshare + ! fill the other components + velgradg(:, :, :, I_DUDZ) = velgradg(:, :, :, I_DWDX) + vortg(:, :, :, I_Y) + velgradg(:, :, :, I_DVDX) = velgradg(:, :, :, I_DUDY) + vortg(:, :, :, I_Z) + velgradg(:, :, :, I_DVDZ) = velgradg(:, :, :, I_DWDY) - vortg(:, :, :, I_X) + !$omp end parallel workshare + call field_halo_fill_vector(velgradg, l_alloc=.true.) end subroutine vel2vgrad diff --git a/src/3d/parcels/parcel_container.f90 b/src/3d/parcels/parcel_container.f90 index faa92f3e9..c89b25614 100644 --- a/src/3d/parcels/parcel_container.f90 +++ b/src/3d/parcels/parcel_container.f90 @@ -54,7 +54,10 @@ module parcel_container IDX_RK4_DB23, & ! RK4 variable for B23 IDX_RK4_DUDX, & ! RK4 variable du/dx IDX_RK4_DUDY, & ! RK4 variable du/dy + IDX_RK4_DUDZ, & ! RK4 variable du/dz + IDX_RK4_DVDX, & ! RK4 variable dv/dx IDX_RK4_DVDY, & ! RK4 variable dv/dy + IDX_RK4_DVDZ, & ! RK4 variable dv/dz IDX_RK4_DWDX, & ! RK4 variable dw/dx IDX_RK4_DWDY ! RK4 variable dw/dy @@ -130,11 +133,14 @@ subroutine set_buffer_indices IDX_RK4_DB23 = i + 10 IDX_RK4_DUDX = i + 11 IDX_RK4_DUDY = i + 12 - IDX_RK4_DVDY = i + 13 - IDX_RK4_DWDX = i + 14 - IDX_RK4_DWDY = i + 15 + IDX_RK4_DUDZ = i + 13 + IDX_RK4_DVDX = i + 14 + IDX_RK4_DVDY = i + 15 + IDX_RK4_DVDZ = i + 16 + IDX_RK4_DWDX = i + 17 + IDX_RK4_DWDY = i + 18 - i = i + 16 + i = i + 19 n_par_attrib = set_ellipsoid_buffer_indices(i) @@ -315,7 +321,7 @@ subroutine parcel_alloc(num) ! LS-RK4 variables allocate(parcels%delta_pos(3, num)) allocate(parcels%delta_vor(3, num)) - allocate(parcels%strain(5, num)) + allocate(parcels%strain(8, num)) allocate(parcels%delta_b(5, num)) end subroutine parcel_alloc diff --git a/src/3d/parcels/parcel_damping.f90 b/src/3d/parcels/parcel_damping.f90 new file mode 100644 index 000000000..9e79d6e0b --- /dev/null +++ b/src/3d/parcels/parcel_damping.f90 @@ -0,0 +1,151 @@ +! ============================================================================= +! This module contains the subroutines to do damping of parcel properties to +! gridded fields in a conservative manner. It does this by nudging all the +! parcels associated with a grid point to the grid point value, with the strength +! of damping proportional to the grid point contribution to the gridded value and +! the strain rate at the grid point. +! ============================================================================= +module parcel_damping + use constants, only : f14, zero, one + use mpi_timer, only : start_timer, stop_timer + use parameters, only : nx, nz, vmin + use parcel_container, only : parcels, n_parcels + use parcel_ellipsoid + use parcel_interpl + use fields + use omp_lib + use options, only : damping + use inversion_mod, only : vor2vel + use rk_utils, only : get_strain_magnitude_field + implicit none + + + private + + ! interpolation indices + ! (first dimension x, y, z; second dimension l-th index) + integer :: is, js, ks + integer :: damping_timer + + ! interpolation weights + double precision :: weights(0:1,0:1,0:1) + double precision :: weight(0:1,0:1,0:1) + double precision :: time_fact(0:1,0:1,0:1) + + public :: parcel_damp, damping_timer + + contains + + subroutine parcel_damp(dt) + double precision, intent(in) :: dt + + if (damping%l_vorticity .or. damping%l_scalars) then + ! ensure gridded fields are up to date + call par2grid(.false.) + call vor2vel + + ! Reflect beyond boundaries to ensure damping is conservative + ! This is because the points below the surface contribute to the level above + !$omp parallel workshare + vortg(-1, :, :, :) = vortg(1, :, :, :) + vortg(nz+1, :, :, :) = vortg(nz-1, :, :, :) + +#ifndef ENABLE_DRY_MODE + humg(-1, :, :) = humg(1, :, :) + humg(nz+1, :, :) = humg(nz-1, :, :) + dbuoyg(-1, :, :) = dbuoyg(1, :, :) + dbuoyg(nz+1, :, :) = dbuoyg(nz-1, :, :) +#endif + tbuoyg(-1, :, :) = tbuoyg(1, :, :) + tbuoyg(nz+1, :, :) = tbuoyg(nz-1, :, :) + !$omp end parallel workshare + + call get_strain_magnitude_field + call perturbation_damping(dt, .true.) + end if + + end subroutine parcel_damp + + ! + ! @pre + subroutine perturbation_damping(dt, l_reuse) + double precision, intent(in) :: dt + logical, intent(in) :: l_reuse + integer :: n, p, l + double precision :: points(3, n_points_p2g) + double precision :: pvol + ! tendencies need to be summed up between associated 4 points + ! before modifying the parcel attribute + double precision :: vortend(3) + double precision :: buoytend +#ifndef ENABLE_DRY_MODE + double precision :: humtend +#endif + + call start_timer(damping_timer) + + !$omp parallel default(shared) + !$omp do private(n, p, l, points, pvol, weight) & +#ifndef ENABLE_DRY_MODE + !$omp& private(is, js, ks, weights, vortend, buoytend, humtend, time_fact) +#else + !$omp& private(is, js, ks, weights, vortend, buoytend, time_fact) +#endif + do n = 1, n_parcels + pvol = parcels%volume(n) +#ifndef ENABLE_P2G_1POINT + points = get_ellipsoid_points(parcels%position(:, n), & + pvol, parcels%B(:, n), n, l_reuse) +#else + points(:, 1) = parcels%position(:, n) +#endif + vortend = zero + buoytend = zero +#ifndef ENABLE_DRY_MODE + humtend = zero +#endif + + ! we have 4 points per ellipsoid + do p = 1, n_points_p2g + call trilinear(points(:, p), is, js, ks, weights) + weight = point_weight_p2g * weights + if (damping%l_vorticity) then + ! Note this exponential factor can be different for vorticity/scalars + time_fact = one - exp(-damping%vorticity_prefactor * strain_mag(ks:ks+1, js:js+1, is:is+1) * dt) + do l = 1,3 + vortend(l) = vortend(l)+sum(weight * time_fact * (vortg(ks:ks+1, js:js+1, is:is+1, l) & + - parcels%vorticity(l,n))) + enddo + endif + if (damping%l_scalars) then + time_fact = one - exp(-damping%scalars_prefactor * strain_mag(ks:ks+1, js:js+1, is:is+1) * dt) +#ifndef ENABLE_DRY_MODE + humtend = humtend + sum(weight * time_fact * (humg(ks:ks+1, js:js+1, is:is+1) - parcels%humidity(n))) + buoytend = buoytend + sum(weight * time_fact * (dbuoyg(ks:ks+1, js:js+1, is:is+1) - parcels%buoyancy(n))) +#else + buoytend = buoytend + sum(weight * time_fact * (tbuoyg(ks:ks+1, js:js+1, is:is+1) - parcels%buoyancy(n))) +#endif + endif + enddo + ! Add all the tendencies only at the end + if (damping%l_vorticity) then + do l=1,3 + parcels%vorticity(l,n) = parcels%vorticity(l,n) + vortend(l) + enddo + endif + if (damping%l_scalars) then +#ifndef ENABLE_DRY_MODE + parcels%humidity(n) = parcels%humidity(n) + humtend +#endif + parcels%buoyancy(n) = parcels%buoyancy(n) + buoytend + endif + enddo + !$omp end do + !$omp end parallel + + call stop_timer(damping_timer) + + end subroutine perturbation_damping + + +end module parcel_damping diff --git a/src/3d/parcels/parcel_diagnostics.f90 b/src/3d/parcels/parcel_diagnostics.f90 index ee5aabb67..93816e0eb 100644 --- a/src/3d/parcels/parcel_diagnostics.f90 +++ b/src/3d/parcels/parcel_diagnostics.f90 @@ -7,7 +7,7 @@ module parcel_diagnostics use parcel_container, only : parcels, n_parcels, n_total_parcels use parcel_ellipsoid use parcel_split_mod, only : n_parcel_splits - use parcel_merging, only : n_parcel_merges + use parcel_merging, only : n_parcel_merges, n_big_close, n_way_parcel_mergers use omp_lib use physics, only : ape_calculation use ape_density, only : ape_den @@ -37,11 +37,13 @@ module parcel_diagnostics IDX_NTOT_PAR = 12, & ! total number of parcels IDX_ENSTROPHY = 13, & ! enstrophy IDX_NSPLITS = 14, & ! number of parcel splits since last write - IDX_NMERGES = 15, & ! number of parcel merges since last write - IDX_MIN_BUOY = 16, & ! minimum parcel buoyancy - IDX_MAX_BUOY = 17 ! maximum parcel buoyancy + IDX_NBIG_ICLO = 15, & ! number of big iclo neighbours (merging) + IDX_NMERGES = 16, & ! number of parcel merges since last write + IDX_MIN_BUOY = 17, & ! minimum parcel buoyancy + IDX_MAX_BUOY = 18 ! maximum parcel buoyancy double precision :: parcel_stats(IDX_MAX_BUOY) + integer :: parcel_merge_stats(size(n_way_parcel_mergers)) contains @@ -119,13 +121,17 @@ subroutine calculate_parcel_diagnostics parcel_stats(IDX_MIN_BUOY) = bmin parcel_stats(IDX_MAX_BUOY) = bmax - parcel_stats(IDX_NSPLITS) = n_parcel_splits - parcel_stats(IDX_NMERGES) = n_parcel_merges + parcel_stats(IDX_NSPLITS) = n_parcel_splits + parcel_stats(IDX_NBIG_ICLO) = n_big_close + parcel_stats(IDX_NMERGES) = n_parcel_merges call mpi_blocking_reduce(parcel_stats(IDX_APE:IDX_NMERGES), MPI_SUM, world) call mpi_blocking_reduce(parcel_stats(IDX_MIN_BUOY), MPI_MIN, world) call mpi_blocking_reduce(parcel_stats(IDX_MAX_BUOY), MPI_MAX, world) + parcel_merge_stats = n_way_parcel_mergers + call mpi_blocking_reduce(parcel_merge_stats, MPI_SUM, world) + n_total_parcels = nint(parcel_stats(IDX_NTOT_PAR)) ntoti = one / dble(n_total_parcels) diff --git a/src/3d/parcels/parcel_diagnostics_netcdf.f90 b/src/3d/parcels/parcel_diagnostics_netcdf.f90 index 07ad02a3b..5c9d419cd 100644 --- a/src/3d/parcels/parcel_diagnostics_netcdf.f90 +++ b/src/3d/parcels/parcel_diagnostics_netcdf.f90 @@ -10,7 +10,7 @@ module parcel_diagnostics_netcdf use parcel_diagnostics use parameters, only : lower, extent, nx, ny, nz, write_zeta_boundary_flag use parcel_split_mod, only : n_parcel_splits - use parcel_merging, only : n_parcel_merges + use parcel_merging, only : n_parcel_merges, n_big_close, n_way_parcel_mergers use config, only : package_version, cf_version use mpi_timer, only : start_timer, stop_timer use options, only : write_netcdf_options @@ -22,23 +22,37 @@ module parcel_diagnostics_netcdf character(len=512) :: ncfname integer :: ncid - integer :: t_axis_id, t_dim_id, n_writes, & - ape_id, ke_id, te_id, npar_id, nspar_id, & - rms_x_vor_id, rms_y_vor_id, rms_z_vor_id, & - avg_lam_id, std_lam_id, & - avg_vol_id, std_vol_id, sum_vol_id, & - en_id, n_par_split_id, n_par_merge_id, & - min_buo_id, max_buo_id - + integer :: t_axis_id, t_dim_id, n_writes, nmerge_dim_id double precision :: restart_time - - integer :: parcel_stats_io_timer + integer :: parcel_stats_io_timer + + integer, parameter :: NC_APE = 1 & + , NC_KE = 2 & + , NC_TE = 3 & + , NC_EN = 4 & + , NC_NPAR = 5 & + , NC_NSPAR = 6 & + , NC_RMS_X_VOR = 7 & + , NC_RMS_Y_VOR = 8 & + , NC_RMS_Z_VOR = 9 & + , NC_AVG_LAM = 10 & + , NC_STD_LAM = 11 & + , NC_AVG_VOL = 12 & + , NC_STD_VOL = 13 & + , NC_SUM_VOL = 14 & + , NC_NPAR_SPLIT = 15 & + , NC_NBIG_CLOSE = 16 & + , NC_NPAR_MERGE = 17 & + , NC_MIN_BUOY = 18 & + , NC_MAX_BUOY = 19 & + , NC_NWAY_MERGE = 20 + + type(netcdf_info) :: nc_dset(NC_NWAY_MERGE) public :: create_netcdf_parcel_stats_file, & write_netcdf_parcel_stats, & parcel_stats_io_timer - contains ! Create the parcel diagnostic file. @@ -49,12 +63,18 @@ subroutine create_netcdf_parcel_stats_file(basename, overwrite, l_restart) logical, intent(in) :: overwrite logical, intent(in) :: l_restart logical :: l_exist - integer :: start(1), cnt(1) + integer :: start(1), cnt(1), n if (world%rank /= world%root) then return endif + call set_netcdf_parcel_diagnostics_info + + nc_dset(:)%l_enabled = .true. + + nc_dset(NC_APE)%l_enabled = (ape_calculation == 'ape density') + ncfname = basename // '_parcel_stats.nc' restart_time = -one @@ -95,302 +115,291 @@ subroutine create_netcdf_parcel_stats_file(basename, overwrite, l_restart) call define_netcdf_temporal_dimension(ncid, t_dim_id, t_axis_id) - if (ape_calculation == 'ape density') then - call define_netcdf_dataset( & - ncid=ncid, & - name='ape', & - long_name='domain-averaged available potential energy', & - std_name='', & - unit='m^2/s^2', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=ape_id) + call define_netcdf_dimension(ncid=ncid, & + name='n_merge', & + dimsize=size(parcel_merge_stats), & + dimid=nmerge_dim_id) + + ! define parcel diagnostics + do n = 1, size(nc_dset) + if (nc_dset(n)%l_enabled) then + if (n == NC_NWAY_MERGE) then + cycle + endif + call define_netcdf_dataset(ncid=ncid, & + name=nc_dset(n)%name, & + long_name=nc_dset(n)%long_name, & + std_name=nc_dset(n)%std_name, & + unit=nc_dset(n)%unit, & + dtype=nc_dset(n)%dtype, & + dimids=(/t_dim_id/), & + varid=nc_dset(n)%varid) + + endif + enddo + + n = NC_NWAY_MERGE + call define_netcdf_dataset(ncid=ncid, & + name=nc_dset(n)%name, & + long_name=nc_dset(n)%long_name, & + std_name=nc_dset(n)%std_name, & + unit=nc_dset(n)%unit, & + dtype=nc_dset(n)%dtype, & + dimids=(/nmerge_dim_id, t_dim_id/), & + varid=nc_dset(n)%varid) + + call close_definition(ncid) + + call close_netcdf_file(ncid, l_serial=.true.) + + end subroutine create_netcdf_parcel_stats_file + + ! Pre-condition: Assumes an open file and nc_dset being initialised. + subroutine read_netcdf_parcel_stats_content + integer :: n + + call get_dim_id(ncid, 't', t_dim_id) + + call get_var_id(ncid, 't', t_axis_id) + + do n = 1, size(nc_dset) + if (nc_dset(n)%l_enabled) then + call get_var_id(ncid, nc_dset(n)%name, nc_dset(n)%varid) + endif + enddo + + end subroutine read_netcdf_parcel_stats_content + + ! Write a step in the parcel diagnostic file. + ! @param[in] t is the time + subroutine write_netcdf_parcel_stats(t) + double precision, intent(in) :: t + + call start_timer(parcel_stats_io_timer) + + ! reset counters for parcel operations + n_parcel_splits = 0 + n_parcel_merges = 0 + n_big_close = 0 + n_way_parcel_mergers = 0 + + if (world%rank /= world%root) then + call stop_timer(parcel_stats_io_timer) + return endif - call define_netcdf_dataset( & - ncid=ncid, & + if (t <= restart_time) then + call stop_timer(parcel_stats_io_timer) + return + endif + + call open_netcdf_file(ncfname, NF90_WRITE, ncid, l_serial=.true.) + + if (n_writes == 1) then + call write_zeta_boundary_flag(ncid) + endif + + ! write time + call write_netcdf_scalar(ncid, t_axis_id, t, n_writes, l_serial=.true.) + + ! + ! write diagnostics + ! + + call write_diagnostic(NC_APE, parcel_stats(IDX_APE)) + call write_diagnostic(NC_KE, parcel_stats(IDX_KE)) + call write_diagnostic(NC_TE, parcel_stats(IDX_KE) + parcel_stats(IDX_APE)) + call write_diagnostic(NC_EN, parcel_stats(IDX_ENSTROPHY)) + call write_diagnostic(NC_NPAR, parcel_stats(IDX_NTOT_PAR)) + call write_diagnostic(NC_NSPAR, parcel_stats(IDX_N_SMALL)) + call write_diagnostic(NC_AVG_LAM, parcel_stats(IDX_AVG_LAM)) + call write_diagnostic(NC_STD_LAM, parcel_stats(IDX_STD_LAM)) + call write_diagnostic(NC_AVG_VOL, parcel_stats(IDX_AVG_VOL)) + call write_diagnostic(NC_STD_VOL, parcel_stats(IDX_STD_VOL)) + call write_diagnostic(NC_SUM_VOL, parcel_stats(IDX_SUM_VOL)) + call write_diagnostic(NC_RMS_X_VOR, parcel_stats(IDX_RMS_XI)) + call write_diagnostic(NC_RMS_Y_VOR, parcel_stats(IDX_RMS_ETA)) + call write_diagnostic(NC_RMS_Z_VOR, parcel_stats(IDX_RMS_ZETA)) + call write_diagnostic(NC_NPAR_SPLIT, parcel_stats(IDX_NSPLITS)) + call write_diagnostic(NC_NBIG_CLOSE, parcel_stats(IDX_NBIG_ICLO)) + call write_diagnostic(NC_NPAR_MERGE, parcel_stats(IDX_NMERGES)) + call write_diagnostic(NC_MIN_BUOY, parcel_stats(IDX_MIN_BUOY)) + call write_diagnostic(NC_MAX_BUOY, parcel_stats(IDX_MAX_BUOY)) + + call write_netcdf_dataset(ncid, nc_dset(NC_NWAY_MERGE)%varid, parcel_merge_stats, & + start=(/1, n_writes/), cnt=(/size(parcel_merge_stats), 1/), & + l_serial=.true.) + + ! increment counter + n_writes = n_writes + 1 + + call close_netcdf_file(ncid, l_serial=.true.) + + call stop_timer(parcel_stats_io_timer) + + end subroutine write_netcdf_parcel_stats + + !:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + subroutine write_diagnostic(id, val) + integer, intent(in) :: id + double precision, intent(in) :: val + + if (nc_dset(id)%l_enabled) then + if (nc_dset(id)%dtype == NF90_INT) then + call write_netcdf_scalar(ncid, nc_dset(id)%varid, int(val), & + n_writes, l_serial=.true.) + else + call write_netcdf_scalar(ncid, nc_dset(id)%varid, val, & + n_writes, l_serial=.true.) + endif + endif + + end subroutine write_diagnostic + + !:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + subroutine set_netcdf_parcel_diagnostics_info + nc_dset(NC_APE) = netcdf_info( & + name='ape', & + long_name='domain-averaged available potential energy', & + std_name='', & + unit='m^2/s^2', & + dtype=NF90_DOUBLE) + + nc_dset(NC_KE) = netcdf_info( & name='ke', & long_name='domain-averaged kinetic energy', & std_name='', & unit='m^2/s^2', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=ke_id) + dtype=NF90_DOUBLE) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_TE) = netcdf_info( & name='te', & long_name='domain-averaged total energy', & std_name='', & unit='m^2/s^2', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=te_id) + dtype=NF90_DOUBLE) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_EN) = netcdf_info( & name='en', & long_name='domain-averaged enstrophy', & std_name='', & unit='1/s^2', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=en_id) + dtype=NF90_DOUBLE) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_NPAR) = netcdf_info( & name='n_parcels', & long_name='number of parcels', & std_name='', & unit='1', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=npar_id) + dtype=NF90_INT) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_NSPAR) = netcdf_info( & name='n_small_parcel', & long_name='number of small parcels', & std_name='', & unit='1', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=nspar_id) + dtype=NF90_INT) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_AVG_LAM) = netcdf_info( & name='avg_lam', & long_name='average aspect ratio', & std_name='', & unit='1', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=avg_lam_id) + dtype=NF90_DOUBLE) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_STD_LAM) = netcdf_info( & name='std_lam', & long_name='standard deviation aspect ratio', & std_name='', & unit='1', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=std_lam_id) + dtype=NF90_DOUBLE) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_AVG_VOL) = netcdf_info( & name='avg_vol', & long_name='average volume', & std_name='', & unit='m^3', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=avg_vol_id) + dtype=NF90_DOUBLE) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_STD_VOL) = netcdf_info( & name='std_vol', & long_name='standard deviation volume', & std_name='', & unit='m^3', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=std_vol_id) + dtype=NF90_DOUBLE) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_SUM_VOL) = netcdf_info( & name='sum_vol', & long_name='total volume', & std_name='', & unit='m^3', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=sum_vol_id) + dtype=NF90_DOUBLE) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_RMS_X_VOR) = netcdf_info( & name='x_rms_vorticity', & long_name='root mean square of x vorticity component', & std_name='', & unit='1/s', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=rms_x_vor_id) + dtype=NF90_DOUBLE) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_RMS_Y_VOR) = netcdf_info( & name='y_rms_vorticity', & long_name='root mean square of y vorticity component', & std_name='', & unit='1/s', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=rms_y_vor_id) + dtype=NF90_DOUBLE) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_RMS_Z_VOR) = netcdf_info( & name='z_rms_vorticity', & long_name='root mean square of z vorticity component', & std_name='', & unit='1/s', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=rms_z_vor_id) - - call define_netcdf_dataset( & - ncid=ncid, & - name='n_parcel_splits', & - long_name='number of parcel splits since last time', & - std_name='', & - unit='1', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=n_par_split_id) - - call define_netcdf_dataset( & - ncid=ncid, & - name='n_parcel_merges', & - long_name='number of parcel merges since last time', & - std_name='', & - unit='1', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=n_par_merge_id) - - call define_netcdf_dataset( & - ncid=ncid, & + dtype=NF90_DOUBLE) + + nc_dset(NC_NPAR_SPLIT) = netcdf_info( & + name='n_parcel_splits', & + long_name='number of parcel splits since last time', & + std_name='', & + unit='1', & + dtype=NF90_INT) + + nc_dset(NC_NBIG_CLOSE) = netcdf_info( & + name='n_big_neighbour', & + long_name='number of big parcel neighbours', & + std_name='', & + unit='1', & + dtype=NF90_INT) + + nc_dset(NC_NPAR_MERGE) = netcdf_info( & + name='n_parcel_merges', & + long_name='number of parcel merges since last time', & + std_name='', & + unit='1', & + dtype=NF90_INT) + + nc_dset(NC_MIN_BUOY) = netcdf_info( & name='min_buoyancy', & long_name='minimum parcel buoyancy', & std_name='', & unit='m/s^2', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=min_buo_id) + dtype=NF90_DOUBLE) - call define_netcdf_dataset( & - ncid=ncid, & + nc_dset(NC_MAX_BUOY) = netcdf_info( & name='max_buoyancy', & long_name='maximum parcel buoyancy', & std_name='', & unit='m/s^2', & - dtype=NF90_DOUBLE, & - dimids=(/t_dim_id/), & - varid=max_buo_id) - - call close_definition(ncid) - - call close_netcdf_file(ncid, l_serial=.true.) - - end subroutine create_netcdf_parcel_stats_file - - ! Pre-condition: Assumes an open file - subroutine read_netcdf_parcel_stats_content - - call get_dim_id(ncid, 't', t_dim_id) - - call get_var_id(ncid, 't', t_axis_id) - - if (ape_calculation == 'ape density') then - call get_var_id(ncid, 'ape', ape_id) - endif - - call get_var_id(ncid, 'ke', ke_id) - - call get_var_id(ncid, 'te', te_id) - - call get_var_id(ncid, 'en', en_id) - - call get_var_id(ncid, 'n_parcels', npar_id) - - call get_var_id(ncid, 'n_small_parcel', nspar_id) - - call get_var_id(ncid, 'avg_lam', avg_lam_id) - - call get_var_id(ncid, 'std_lam', std_lam_id) - - call get_var_id(ncid, 'avg_vol', avg_vol_id) - - call get_var_id(ncid, 'std_vol', std_vol_id) - - call get_var_id(ncid, 'sum_vol', sum_vol_id) - - call get_var_id(ncid, 'x_rms_vorticity', rms_x_vor_id) - - call get_var_id(ncid, 'y_rms_vorticity', rms_y_vor_id) - - call get_var_id(ncid, 'z_rms_vorticity', rms_z_vor_id) - - call get_var_id(ncid, 'n_parcel_splits', n_par_split_id) - - call get_var_id(ncid, 'n_parcel_merges', n_par_merge_id) - - call get_var_id(ncid, 'min_buoyancy', min_buo_id) - - call get_var_id(ncid, 'max_buoyancy', max_buo_id) - - end subroutine read_netcdf_parcel_stats_content - - ! Write a step in the parcel diagnostic file. - ! @param[in] t is the time - subroutine write_netcdf_parcel_stats(t) - double precision, intent(in) :: t + dtype=NF90_DOUBLE) - call start_timer(parcel_stats_io_timer) - - ! reset counters for parcel operations - n_parcel_splits = 0 - n_parcel_merges = 0 - - if (world%rank /= world%root) then - call stop_timer(parcel_stats_io_timer) - return - endif - - if (t <= restart_time) then - call stop_timer(parcel_stats_io_timer) - return - endif - - call open_netcdf_file(ncfname, NF90_WRITE, ncid, l_serial=.true.) - - if (n_writes == 1) then - call write_zeta_boundary_flag(ncid) - endif - - ! write time - call write_netcdf_scalar(ncid, t_axis_id, t, n_writes, l_serial=.true.) - - ! - ! write diagnostics - ! - if (ape_calculation == 'ape density') then - call write_netcdf_scalar(ncid, ape_id, parcel_stats(IDX_APE), n_writes, l_serial=.true.) - endif - call write_netcdf_scalar(ncid, ke_id, parcel_stats(IDX_KE), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, te_id, parcel_stats(IDX_KE) + parcel_stats(IDX_APE), & - n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, npar_id, int(parcel_stats(IDX_NTOT_PAR)), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, nspar_id, int(parcel_stats(IDX_N_SMALL)), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, avg_lam_id, parcel_stats(IDX_AVG_LAM), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, std_lam_id, parcel_stats(IDX_STD_LAM), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, avg_vol_id, parcel_stats(IDX_AVG_VOL), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, std_vol_id, parcel_stats(IDX_STD_VOL), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, sum_vol_id, parcel_stats(IDX_SUM_VOL), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, rms_x_vor_id, parcel_stats(IDX_RMS_XI), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, rms_y_vor_id, parcel_stats(IDX_RMS_ETA), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, rms_z_vor_id, parcel_stats(IDX_RMS_ZETA), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, en_id, parcel_stats(IDX_ENSTROPHY), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, n_par_split_id, parcel_stats(IDX_NSPLITS), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, n_par_merge_id, parcel_stats(IDX_NMERGES), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, min_buo_id, parcel_stats(IDX_MIN_BUOY), n_writes, l_serial=.true.) - call write_netcdf_scalar(ncid, max_buo_id, parcel_stats(IDX_MAX_BUOY), n_writes, l_serial=.true.) - - ! increment counter - n_writes = n_writes + 1 - - call close_netcdf_file(ncid, l_serial=.true.) + nc_dset(NC_NWAY_MERGE) = netcdf_info( & + name='n_way_merging', & + long_name='n-way merging', & + std_name='', & + unit='1', & + dtype=NF90_INT) - call stop_timer(parcel_stats_io_timer) + end subroutine set_netcdf_parcel_diagnostics_info - end subroutine write_netcdf_parcel_stats end module parcel_diagnostics_netcdf diff --git a/src/3d/parcels/parcel_interpl.f90 b/src/3d/parcels/parcel_interpl.f90 index e15a2a0b3..15a27c282 100644 --- a/src/3d/parcels/parcel_interpl.f90 +++ b/src/3d/parcels/parcel_interpl.f90 @@ -84,7 +84,9 @@ module parcel_interpl , grid2par_timer & , halo_swap_timer & , trilinear & - , bilinear + , bilinear & + , n_points_p2g & + , point_weight_p2g contains @@ -457,7 +459,7 @@ subroutine grid2par(add) parcels%delta_pos(l, n) = parcels%delta_pos(l, n) & + point_weight_g2p * sum(weights * velog(ks:ks+1, js:js+1, is:is+1, l)) enddo - do l = 1,5 + do l = 1,8 parcels%strain(l, n) = parcels%strain(l, n) & + point_weight_g2p * sum(weights * velgradg(ks:ks+1, js:js+1, is:is+1, l)) enddo diff --git a/src/3d/parcels/parcel_merge.f90 b/src/3d/parcels/parcel_merge.f90 index 439537712..ab0e907ac 100644 --- a/src/3d/parcels/parcel_merge.f90 +++ b/src/3d/parcels/parcel_merge.f90 @@ -4,6 +4,7 @@ ! ============================================================================= module parcel_merging use parcel_nearest + use parameters, only : vmin use constants, only : pi, zero, one, two, five, f13 use parcel_container, only : parcels & , n_parcels & @@ -29,8 +30,19 @@ module parcel_merging ! number of parcel merges (is reset in every write step) integer :: n_parcel_merges = 0 - private :: geometric_merge, & - do_group_merge + ! number of merging parcels (up to 7 supported, all others are put into index 7) + ! note that array index 1 corresponds to 2-way merging + integer :: n_way_parcel_mergers(7) = 0 + + ! number of big iclo neighbours (number of small is n_merge - n_big_close) + integer :: n_big_close = 0 + + integer, allocatable :: loca(:) + + private :: geometric_merge, & + do_group_merge, & + collect_merge_stats, & + loca contains @@ -51,9 +63,15 @@ subroutine parcel_merge ! find parcels to merge call find_nearest(isma, iclo, inva, n_merge, n_invalid) + call start_timer(merge_timer) + n_parcel_merges = n_parcel_merges + n_merge - call start_timer(merge_timer) + if (n_merge > 0) then + allocate(loca(n_parcels)) + call collect_merge_stats(iclo, n_merge) + endif + if (n_merge > 0) then ! merge small parcels into other parcels @@ -71,6 +89,10 @@ subroutine parcel_merge deallocate(iclo) endif + if (allocated(loca)) then + deallocate(loca) + endif + ! After this operation the root MPI process knows the new ! number of parcels in the simulation n_total_parcels = n_parcels @@ -89,6 +111,7 @@ subroutine parcel_merge end subroutine parcel_merge + !:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: ! Actual merge. ! @param[in] isma are the indices of the small parcels @@ -101,7 +124,6 @@ subroutine do_group_merge(isma, iclo, n_merge, Bm, vm) integer, intent(in) :: iclo(:) integer, intent(in) :: n_merge integer :: m, ic, is, l, n - integer :: loca(n_parcels) double precision :: x0(n_merge), y0(n_merge) double precision :: posm(3, n_merge) double precision :: delx, vmerge, dely, delz, B33, mu @@ -275,6 +297,7 @@ subroutine do_group_merge(isma, iclo, n_merge, Bm, vm) end subroutine do_group_merge + !:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: ! Geometric merging -- called by subroutine merge_parcels. ! @param[inout] parcels is the parcel container @@ -286,7 +309,6 @@ subroutine geometric_merge(isma, iclo, n_merge) integer, intent(in) :: iclo(:) integer, intent(in) :: n_merge integer :: m, ic, l - integer :: loca(n_parcels) double precision :: factor, detB double precision :: B(6, n_merge), & V(n_merge) @@ -318,4 +340,37 @@ subroutine geometric_merge(isma, iclo, n_merge) end subroutine geometric_merge + !:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + subroutine collect_merge_stats(iclo, n_merge) + integer, allocatable, dimension(:) :: iclo + integer :: n_merge + integer :: ic, m, j, n_count + + loca = 0 + + !------------------------------------------------------------------ + ! Find unique 'iclo' indices and the total number of parcels + ! merging with them: + do m = 1, n_merge + ic = iclo(m) + n_big_close = n_big_close + merge(1, 0, parcels%volume(ic) > vmin) + loca(ic) = loca(ic) + 1 + enddo + + !------------------------------------------------------------------ + ! Count the number of 2-, 3-, 4- etc way merging: + do m = 1, n_merge + ic = iclo(m) + n_count = loca(ic) + ! all mergers involving more than size(n_way_parcel_mergers) parcels are added together + if (n_count > 0) then + loca(ic) = -1 + j = min(size(n_way_parcel_mergers), n_count) + n_way_parcel_mergers(j) = n_way_parcel_mergers(j) + 1 + endif + enddo + + end subroutine collect_merge_stats + end module parcel_merging diff --git a/src/3d/parcels/parcel_nearest.f90 b/src/3d/parcels/parcel_nearest.f90 index e2e249126..9986166b1 100644 --- a/src/3d/parcels/parcel_nearest.f90 +++ b/src/3d/parcels/parcel_nearest.f90 @@ -364,8 +364,12 @@ subroutine find_nearest(isma, iclo, inva, n_local_small, n_invalid) ! After this operation isma, iclo and rclo are properly set. call find_closest_parcel_globally(n_local_small, iclo, rclo, dclo) + !--------------------------------------------------------------------- + ! We only need to check up to n_local_small because after the call to + ! find_closest_parcel_globally all small parcels know their closest + ! neighbour on the original MPI rank. if (.not. l_no_small) then - do n = 1, n_local_small + n_remote_small + do n = 1, n_local_small if (iclo(n) == 0) then write(*,*) iclo(n), dclo(n) call mpi_exit_on_error('Merge error: no near enough neighbour found.') @@ -690,7 +694,7 @@ subroutine find_closest_parcel_globally(n_local_small, iclo, rclo, dclo) #ifndef NDEBUG if (small_recv_order(n) /= tag) then call mpi_exit_on_error(& - "parcel_nearest::find_closest_parcel_globally: Wrong messge order.") + "parcel_nearest::find_closest_parcel_globally: Wrong message order.") endif if (small_recv_count(n) /= n_neighbour_small(n)) then diff --git a/src/3d/parcels/parcel_netcdf.f90 b/src/3d/parcels/parcel_netcdf.f90 index 4f81b3216..6763e2587 100644 --- a/src/3d/parcels/parcel_netcdf.f90 +++ b/src/3d/parcels/parcel_netcdf.f90 @@ -1,4 +1,5 @@ module parcel_netcdf + use options, only : output, verbose use constants, only : one use netcdf_utils use netcdf_writer @@ -18,6 +19,8 @@ module parcel_netcdf use fields, only : is_contained implicit none + private + integer :: parcel_io_timer integer :: n_writes = 1 @@ -25,32 +28,41 @@ module parcel_netcdf character(len=512) :: ncfname integer :: ncid - integer :: npar_dim_id, vol_id, buo_id, & - x_pos_id, y_pos_id, z_pos_id, & - x_vor_id, y_vor_id, z_vor_id, & - b11_id, b12_id, b13_id, & - b22_id, b23_id, start_id, & - t_axis_id, t_dim_id, mpi_dim_id + integer :: npar_dim_id & + , t_axis_id & + , t_dim_id & + , mpi_dim_id double precision :: restart_time -#ifndef ENABLE_DRY_MODE - integer :: hum_id -#endif - - private :: ncid, ncfname, n_writes, npar_dim_id, & - x_pos_id, y_pos_id, z_pos_id, start_id, & - x_vor_id, y_vor_id, z_vor_id, & - b11_id, b12_id, b13_id, b22_id, b23_id, & - vol_id, buo_id, t_dim_id, t_axis_id, & - restart_time, mpi_dim_id, & - read_chunk + integer, parameter :: NC_START = 1 & + , NC_VOL = 2 & + , NC_X_POS = 3 & + , NC_Y_POS = 4 & + , NC_Z_POS = 5 & + , NC_BUOY = 6 & + , NC_X_VOR = 7 & + , NC_Y_VOR = 8 & + , NC_Z_VOR = 9 & + , NC_B11 = 10 & + , NC_B12 = 11 & + , NC_B13 = 12 & + , NC_B22 = 13 & + , NC_B23 = 14 + + logical :: l_first_write = .true. #ifndef ENABLE_DRY_MODE - private :: hum_id -#endif + integer, parameter :: NC_HUM = 15 - private :: ncbasename + type(netcdf_info) :: nc_dset(NC_HUM) +#else + type(netcdf_info) :: nc_dset(NC_B23) +#endif + public :: create_netcdf_parcel_file & + , write_netcdf_parcels & + , read_netcdf_parcels & + , parcel_io_timer contains @@ -63,6 +75,9 @@ subroutine create_netcdf_parcel_file(basename, overwrite, l_restart) logical, intent(in) :: l_restart logical :: l_exist integer :: dimids(2) + integer :: n + + call set_netcdf_parcel_output ncfname = basename // '_' // zfill(n_writes) // '_parcels.nc' @@ -120,142 +135,31 @@ subroutine create_netcdf_parcel_file(basename, overwrite, l_restart) dimids = (/npar_dim_id, t_dim_id/) - call define_netcdf_dataset(ncid=ncid, & - name='x_position', & - long_name='x position component', & - std_name='', & - unit='m', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=x_pos_id) - - call define_netcdf_dataset(ncid=ncid, & - name='y_position', & - long_name='y position component', & - std_name='', & - unit='m', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=y_pos_id) - - call define_netcdf_dataset(ncid=ncid, & - name='z_position', & - long_name='z position component', & - std_name='', & - unit='m', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=z_pos_id) - - call define_netcdf_dataset(ncid=ncid, & - name='start_index', & - long_name='MPI rank start index', & - std_name='', & - unit='1', & - dtype=NF90_INT, & - dimids=(/mpi_dim_id/), & - varid=start_id) - - call define_netcdf_dataset(ncid=ncid, & - name='B11', & - long_name='B11 element of shape matrix', & - std_name='', & - unit='m^2', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=b11_id) - - call define_netcdf_dataset(ncid=ncid, & - name='B12', & - long_name='B12 element of shape matrix', & - std_name='', & - unit='m^2', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=b12_id) - - call define_netcdf_dataset(ncid=ncid, & - name='B13', & - long_name='B13 element of shape matrix', & - std_name='', & - unit='m^2', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=b13_id) - - call define_netcdf_dataset(ncid=ncid, & - name='B22', & - long_name='B22 element of shape matrix', & - std_name='', & - unit='m^2', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=b22_id) - - call define_netcdf_dataset(ncid=ncid, & - name='B23', & - long_name='B23 element of shape matrix', & - std_name='', & - unit='m^2', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=b23_id) - - call define_netcdf_dataset(ncid=ncid, & - name='volume', & - long_name='parcel volume', & - std_name='', & - unit='m^3', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=vol_id) - - call define_netcdf_dataset(ncid=ncid, & - name='x_vorticity', & - long_name='x vorticity component', & - std_name='', & - unit='1/s', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=x_vor_id) - - call define_netcdf_dataset(ncid=ncid, & - name='y_vorticity', & - long_name='y vorticity component', & - std_name='', & - unit='1/s', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=y_vor_id) - - call define_netcdf_dataset(ncid=ncid, & - name='z_vorticity', & - long_name='z vorticity component', & - std_name='', & - unit='1/s', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=z_vor_id) - - call define_netcdf_dataset(ncid=ncid, & - name='buoyancy', & - long_name='parcel buoyancy', & - std_name='', & - unit='m/s^2', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=buo_id) + ! define parcel attributes + n = NC_START + if (nc_dset(n)%l_enabled) then + call define_netcdf_dataset(ncid=ncid, & + name=nc_dset(n)%name, & + long_name=nc_dset(n)%long_name, & + std_name=nc_dset(n)%std_name, & + unit=nc_dset(n)%unit, & + dtype=nc_dset(n)%dtype, & + dimids=(/mpi_dim_id/), & + varid=nc_dset(n)%varid) + endif -#ifndef ENABLE_DRY_MODE - call define_netcdf_dataset(ncid=ncid, & - name='humidity', & - long_name='parcel humidity', & - std_name='', & - unit='1', & - dtype=NF90_DOUBLE, & - dimids=dimids, & - varid=hum_id) -#endif + do n = 2, size(nc_dset) + if (nc_dset(n)%l_enabled) then + call define_netcdf_dataset(ncid=ncid, & + name=nc_dset(n)%name, & + long_name=nc_dset(n)%long_name, & + std_name=nc_dset(n)%std_name, & + unit=nc_dset(n)%unit, & + dtype=nc_dset(n)%dtype, & + dimids=dimids, & + varid=nc_dset(n)%varid) + endif + enddo call close_definition(ncid) @@ -313,28 +217,31 @@ subroutine write_netcdf_parcels(t) start = (/ start_index, 1 /) cnt = (/ n_parcels, 1 /) - call write_netcdf_dataset(ncid, start_id, (/start_index/), start=(/1+world%rank, 1/), cnt=(/1, 1/)) + if (nc_dset(NC_START)%l_enabled) then + call write_netcdf_dataset(ncid, nc_dset(NC_START)%varid, (/start_index/), & + start=(/1+world%rank, 1/), cnt=(/1, 1/)) + endif - call write_netcdf_dataset(ncid, x_pos_id, parcels%position(1, 1:n_parcels), start, cnt) - call write_netcdf_dataset(ncid, y_pos_id, parcels%position(2, 1:n_parcels), start, cnt) - call write_netcdf_dataset(ncid, z_pos_id, parcels%position(3, 1:n_parcels), start, cnt) + call write_parcel_attribute(NC_X_POS, parcels%position(1, :), start, cnt) + call write_parcel_attribute(NC_Y_POS, parcels%position(2, :), start, cnt) + call write_parcel_attribute(NC_Z_POS, parcels%position(3, :), start, cnt) - call write_netcdf_dataset(ncid, b11_id, parcels%B(1, 1:n_parcels), start, cnt) - call write_netcdf_dataset(ncid, b12_id, parcels%B(2, 1:n_parcels), start, cnt) - call write_netcdf_dataset(ncid, b13_id, parcels%B(3, 1:n_parcels), start, cnt) - call write_netcdf_dataset(ncid, b22_id, parcels%B(4, 1:n_parcels), start, cnt) - call write_netcdf_dataset(ncid, b23_id, parcels%B(5, 1:n_parcels), start, cnt) + call write_parcel_attribute(NC_B11, parcels%B(1, :), start, cnt) + call write_parcel_attribute(NC_B12, parcels%B(2, :), start, cnt) + call write_parcel_attribute(NC_B13, parcels%B(3, :), start, cnt) + call write_parcel_attribute(NC_B22, parcels%B(4, :), start, cnt) + call write_parcel_attribute(NC_B23, parcels%B(5, :), start, cnt) - call write_netcdf_dataset(ncid, vol_id, parcels%volume(1:n_parcels), start, cnt) + call write_parcel_attribute(NC_VOL, parcels%volume, start, cnt) - call write_netcdf_dataset(ncid, x_vor_id, parcels%vorticity(1, 1:n_parcels), start, cnt) - call write_netcdf_dataset(ncid, y_vor_id, parcels%vorticity(2, 1:n_parcels), start, cnt) - call write_netcdf_dataset(ncid, z_vor_id, parcels%vorticity(3, 1:n_parcels), start, cnt) + call write_parcel_attribute(NC_X_VOR, parcels%vorticity(1, :), start, cnt) + call write_parcel_attribute(NC_Y_VOR, parcels%vorticity(2, :), start, cnt) + call write_parcel_attribute(NC_Z_VOR, parcels%vorticity(3, :), start, cnt) - call write_netcdf_dataset(ncid, buo_id, parcels%buoyancy(1:n_parcels), start, cnt) + call write_parcel_attribute(NC_BUOY, parcels%buoyancy, start, cnt) #ifndef ENABLE_DRY_MODE - call write_netcdf_dataset(ncid, hum_id, parcels%humidity(1:n_parcels), start, cnt) + call write_parcel_attribute(NC_HUM, parcels%humidity, start, cnt) #endif ! increment counter n_writes = n_writes + 1 @@ -345,6 +252,22 @@ subroutine write_netcdf_parcels(t) end subroutine write_netcdf_parcels + !:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + subroutine write_parcel_attribute(id, pdata, start, cnt) + integer, intent(in) :: id + double precision, intent(in) :: pdata(:) + integer, intent(in) :: cnt(2), start(2) + + if (nc_dset(id)%l_enabled) then + call write_netcdf_dataset(ncid, nc_dset(id)%varid, & + pdata(1:n_parcels), & + start, cnt) + endif + end subroutine write_parcel_attribute + + !:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + subroutine read_netcdf_parcels(fname) character(*), intent(in) :: fname integer :: start_index, num_indices, end_index @@ -355,6 +278,8 @@ subroutine read_netcdf_parcels(fname) call start_timer(parcel_io_timer) + call set_netcdf_parcel_info + call open_netcdf_file(fname, NF90_NOWRITE, ncid) call get_num_parcels(ncid, n_total_parcels) @@ -597,4 +522,214 @@ subroutine read_chunk(first, last, pfirst) endif end subroutine read_chunk + !:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + subroutine set_netcdf_parcel_output + integer :: n + logical :: l_enabled_restart = .true. + + call set_netcdf_parcel_info + + ! check custom tags + if (any('all' == output%parcel_list(:))) then + nc_dset(:)%l_enabled = .true. + else if (any('default' == output%parcel_list(:))) then + nc_dset(NC_X_POS)%l_enabled = .true. + nc_dset(NC_Y_POS)%l_enabled = .true. + nc_dset(NC_Z_POS)%l_enabled = .true. + nc_dset(NC_X_VOR)%l_enabled = .true. + nc_dset(NC_Y_VOR)%l_enabled = .true. + nc_dset(NC_Z_VOR)%l_enabled = .true. + nc_dset(NC_BUOY)%l_enabled = .true. +#ifndef ENABLE_DRY_MODE + nc_dset(NC_HUM)%l_enabled = .true. +#endif + nc_dset(NC_VOL)%l_enabled = .true. + nc_dset(NC_START)%l_enabled = .true. + nc_dset(NC_B11)%l_enabled = .true. + nc_dset(NC_B12)%l_enabled = .true. + nc_dset(NC_B13)%l_enabled = .true. + nc_dset(NC_B22)%l_enabled = .true. + nc_dset(NC_B23)%l_enabled = .true. + else + ! check individual fields + do n = 1, size(nc_dset) + nc_dset(n)%l_enabled = any(nc_dset(n)%name == output%parcel_list(:)) + enddo + endif + + if (count(nc_dset(:)%l_enabled) == 0) then + if ((world%rank == world%root) .and. l_first_write) then + print *, "WARNING: No parcel attributes are actively selected. EPIC is going to write" + print *, " the default parcel attributes. Stop the simulation now if this is" + print *, " not your intention. Parcel attributes can be provided to the list" + print *, " 'output%parcel_list' in the configuration file." + print *, " The following parcel attributes are available:" + do n = 1, size(nc_dset) + print *, " " // nc_dset(n)%name // " : " // trim(nc_dset(n)%long_name) + enddo + print *, " " // "all" // repeat(" ", 29) // " : write all parcel attributes" + print *, " " // "default" // repeat(" ", 25) // " : write default parcel attributes" + print *, "" + endif + nc_dset(NC_START)%l_enabled = .true. + nc_dset(NC_X_POS)%l_enabled = .true. + nc_dset(NC_Y_POS)%l_enabled = .true. + nc_dset(NC_Z_POS)%l_enabled = .true. + nc_dset(NC_X_VOR)%l_enabled = .true. + nc_dset(NC_Y_VOR)%l_enabled = .true. + nc_dset(NC_Z_VOR)%l_enabled = .true. + nc_dset(NC_BUOY)%l_enabled = .true. +#ifndef ENABLE_DRY_MODE + nc_dset(NC_HUM)%l_enabled = .true. +#endif + nc_dset(NC_VOL)%l_enabled = .true. + nc_dset(NC_B11)%l_enabled = .true. + nc_dset(NC_B12)%l_enabled = .true. + nc_dset(NC_B13)%l_enabled = .true. + nc_dset(NC_B22)%l_enabled = .true. + nc_dset(NC_B23)%l_enabled = .true. + endif + +#ifdef ENABLE_VERBOSE + if (verbose .and. (world%rank == world%root) .and. l_first_write) then + print *, "EPIC is going to write the following parcel attributes:" + do n = 1, size(nc_dset) + if (nc_dset(n)%l_enabled) then + print *, repeat(" ", 4) // trim(nc_dset(n)%name) + endif + enddo + print *, "" + endif +#endif + + + if (l_first_write) then + l_first_write = .false. + + do n = NC_X_POS, NC_Y_POS + l_enabled_restart = (l_enabled_restart .and. nc_dset(n)%l_enabled) + enddo + + do n = NC_B11, NC_B23 + l_enabled_restart = (l_enabled_restart .and. nc_dset(n)%l_enabled) + enddo + l_enabled_restart = (l_enabled_restart .and. nc_dset(NC_VOL)%l_enabled) + + l_enabled_restart = (l_enabled_restart .and. & + ((nc_dset(NC_X_VOR)%l_enabled .and. & + nc_dset(NC_Y_VOR)%l_enabled .and. & + nc_dset(NC_Z_VOR)%l_enabled) .or. & + nc_dset(NC_BUOY)%l_enabled)) + + + if ((.not. l_enabled_restart) .and. (world%rank == world%root)) then + print *, "WARNING: EPIC will not be able to restart from a parcel file." + print *, " You must at least write the B-shape matrix, parcel position" + print *, " parcel volume and parcel vorticity or buoyancy to enable a" + print *, " restart. If you intend to restart from a parcel file later," + print *, " you must stop the simulation immediately. Furthermore, you can" + print *, " write the MPI 'start_index' to speed up the restart." + endif + endif + + end subroutine set_netcdf_parcel_output + + !:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + + subroutine set_netcdf_parcel_info + + nc_dset(NC_X_POS) = netcdf_info(name='x_position', & + long_name='x position component', & + std_name='', & + unit='m', & + dtype=NF90_DOUBLE) + + nc_dset(NC_Y_POS) = netcdf_info(name='y_position', & + long_name='y position component', & + std_name='', & + unit='m', & + dtype=NF90_DOUBLE) + + nc_dset(NC_Z_POS) = netcdf_info(name='z_position', & + long_name='z position component', & + std_name='', & + unit='m', & + dtype=NF90_DOUBLE) + + nc_dset(NC_START) = netcdf_info(name='start_index', & + long_name='MPI rank start index', & + std_name='', & + unit='1', & + dtype=NF90_INT) + + nc_dset(NC_B11) = netcdf_info(name='B11', & + long_name='B11 element of shape matrix', & + std_name='', & + unit='m^2', & + dtype=NF90_DOUBLE) + + nc_dset(NC_B12) = netcdf_info(name='B12', & + long_name='B12 element of shape matrix', & + std_name='', & + unit='m^2', & + dtype=NF90_DOUBLE) + + nc_dset(NC_B13) = netcdf_info(name='B13', & + long_name='B13 element of shape matrix', & + std_name='', & + unit='m^2', & + dtype=NF90_DOUBLE) + + nc_dset(NC_B22) = netcdf_info(name='B22', & + long_name='B22 element of shape matrix', & + std_name='', & + unit='m^2', & + dtype=NF90_DOUBLE) + + nc_dset(NC_B23) = netcdf_info(name='B23', & + long_name='B23 element of shape matrix', & + std_name='', & + unit='m^2', & + dtype=NF90_DOUBLE) + + nc_dset(NC_VOL) = netcdf_info(name='volume', & + long_name='parcel volume', & + std_name='', & + unit='m^3', & + dtype=NF90_DOUBLE) + + nc_dset(NC_X_VOR) = netcdf_info(name='x_vorticity', & + long_name='x vorticity component', & + std_name='', & + unit='1/s', & + dtype=NF90_DOUBLE) + + nc_dset(NC_Y_VOR) = netcdf_info(name='y_vorticity', & + long_name='y vorticity component', & + std_name='', & + unit='1/s', & + dtype=NF90_DOUBLE) + + nc_dset(NC_Z_VOR) = netcdf_info(name='z_vorticity', & + long_name='z vorticity component', & + std_name='', & + unit='1/s', & + dtype=NF90_DOUBLE) + + nc_dset(NC_BUOY) = netcdf_info(name='buoyancy', & + long_name='parcel buoyancy', & + std_name='', & + unit='m/s^2', & + dtype=NF90_DOUBLE) + +#ifndef ENABLE_DRY_MODE + nc_dset(NC_HUM) = netcdf_info(name='humidity', & + long_name='parcel humidity', & + std_name='', & + unit='1', & + dtype=NF90_DOUBLE) +#endif + end subroutine set_netcdf_parcel_info + end module parcel_netcdf diff --git a/src/3d/stepper/ls_rk.f90 b/src/3d/stepper/ls_rk.f90 index 8605c00dc..7ead8fd35 100644 --- a/src/3d/stepper/ls_rk.f90 +++ b/src/3d/stepper/ls_rk.f90 @@ -11,6 +11,7 @@ module ls_rk use rk_utils, only: get_dBdt, get_time_step use utils, only : write_step use parcel_interpl, only : par2grid, grid2par + use parcel_damping, only : parcel_damp use fields, only : velgradg, velog, vortg, vtend, tbuoyg use inversion_mod, only : vor2vel, vorticity_tendency use parcel_diagnostics, only : calculate_parcel_diagnostics @@ -123,6 +124,8 @@ subroutine ls_rk_step(t) call apply_parcel_reflective_bc call stop_timer(rk_timer) + call parcel_damp(dt) + ! we need to subtract 14 calls since we start and stop ! the timer multiple times which increments n_calls timings(rk_timer)%n_calls = timings(rk_timer)%n_calls - (3 * n_stages - 1) @@ -150,7 +153,6 @@ subroutine ls_rk_substep(dt, step) do n = 1, n_parcels parcels%delta_b(:, n) = get_dBdt(parcels%B(:, n), & parcels%strain(:, n), & - parcels%vorticity(:, n), & parcels%volume(n)) enddo !$omp end parallel do @@ -170,7 +172,6 @@ subroutine ls_rk_substep(dt, step) parcels%delta_b(:, n) = parcels%delta_b(:, n) & + get_dBdt(parcels%B(:, n), & parcels%strain(:, n), & - parcels%vorticity(:, n), & parcels%volume(n)) enddo !$omp end parallel do @@ -211,10 +212,10 @@ subroutine ls_rk_substep(dt, step) call parcel_communicate - call par2grid - call stop_timer(rk_timer) + call par2grid + end subroutine ls_rk_substep end module ls_rk diff --git a/src/3d/stepper/rk_utils.f90 b/src/3d/stepper/rk_utils.f90 index d75d9128c..6c61fd8d6 100644 --- a/src/3d/stepper/rk_utils.f90 +++ b/src/3d/stepper/rk_utils.f90 @@ -1,7 +1,7 @@ module rk_utils use dimensions, only : n_dim, I_X, I_Y, I_Z use parcel_ellipsoid, only : get_B33, I_B11, I_B12, I_B13, I_B22, I_B23 - use fields, only : velgradg, tbuoyg, vortg, I_DUDX, I_DUDY, I_DVDY, I_DWDX, I_DWDY + use fields, only : velgradg, tbuoyg, vortg, I_DUDX, I_DUDY, I_DUDZ, I_DVDX, I_DVDY, I_DVDZ, I_DWDX, I_DWDY, strain_mag use field_mpi, only : field_halo_fill_scalar use constants, only : zero, one, two, f12 use parameters, only : nx, ny, nz, dxi, vcell @@ -26,22 +26,12 @@ module rk_utils ! @param[in] vorticity of parcel ! @param[in] volume is the parcel volume ! @returns dB/dt in Bout - function get_dBdt(Bin, S, vorticity, volume) result(Bout) + function get_dBdt(Bin, S, volume) result(Bout) double precision, intent(in) :: Bin(I_B23) - double precision, intent(in) :: S(5) - double precision, intent(in) :: vorticity(n_dim) + double precision, intent(in) :: S(8) double precision, intent(in) :: volume double precision :: Bout(5), B33 - double precision :: dudz, dvdx, dvdz, dwdz - - ! du/dz = \eta + dw/dx - dudz = vorticity(I_Y) + S(I_DWDX) - - ! dv/dx \zeta + du/dy - dvdx = vorticity(I_Z) + S(I_DUDY) - - ! dv/dz = dw/dy - \xi - dvdz = S(I_DWDY) - vorticity(I_X) + double precision :: dwdz ! dw/dz = - (du/dx + dv/dy) dwdz = - (S(I_DUDX) + S(I_DVDY)) @@ -49,33 +39,72 @@ function get_dBdt(Bin, S, vorticity, volume) result(Bout) B33 = get_B33(Bin, volume) ! dB11/dt = 2 * (du/dx * B11 + du/dy * B12 + du/dz * B13) - Bout(I_B11) = two * (S(I_DUDX) * Bin(I_B11) + S(I_DUDY) * Bin(I_B12) + dudz * Bin(I_B13)) + Bout(I_B11) = two * (S(I_DUDX) * Bin(I_B11) + S(I_DUDY) * Bin(I_B12) + S(I_DUDZ) * Bin(I_B13)) ! dB12/dt = - Bout(I_B12) = dvdx * Bin(I_B11) & ! dv/dx * B11 + Bout(I_B12) = S(I_DVDX) * Bin(I_B11) & ! dv/dx * B11 - dwdz * Bin(I_B12) & ! - dw/dz * B12 - + dvdz * Bin(I_B13) & ! + dv/dz * B13 + + S(I_DVDZ) * Bin(I_B13) & ! + dv/dz * B13 + S(I_DUDY) * Bin(I_B22) & ! + du/dy * B22 - + dudz * Bin(I_B23) ! + du/dz * B23 + + S(I_DUDZ) * Bin(I_B23) ! + du/dz * B23 ! dB13/dt = Bout(I_B13) = S(I_DWDX) * Bin(I_B11) & ! dw/dx * B11 + S(I_DWDY) * Bin(I_B12) & ! + dw/dy * B12 - S(I_DVDY) * Bin(I_B13) & ! - dv/dy * B13 + S(I_DUDY) * Bin(I_B23) & ! + du/dy * B23 - + dudz * B33 ! + du/dz * B33 + + S(I_DUDZ) * B33 ! + du/dz * B33 ! dB22/dt = 2 * (dv/dx * B12 + dv/dy * B22 + dv/dz * B23) - Bout(I_B22) = two * (dvdx * Bin(I_B12) + S(I_DVDY) * Bin(I_B22) + dvdz * Bin(I_B23)) + Bout(I_B22) = two * (S(I_DVDX) * Bin(I_B12) + S(I_DVDY) * Bin(I_B22) + S(I_DVDZ) * Bin(I_B23)) ! dB23/dt = Bout(I_B23) = S(I_DWDX) * Bin(I_B12) & ! dw/dx * B12 - + dvdx * Bin(I_B13) & ! + dv/dx * B13 + + S(I_DVDX) * Bin(I_B13) & ! + dv/dx * B13 + S(I_DWDY) * Bin(I_B22) & ! + dw/dy * B22 - S(I_DUDX) * Bin(I_B23) & ! - du/dx * B23 - + dvdz * B33 ! + dv/dz * B33 + + S(I_DVDZ) * B33 ! + dv/dz * B33 end function get_dBdt + ! Calculate velocity strain + ! @param[in] velocity gradient tensor at grid point + ! @param[in] vorticity at grid point + ! @returns 3x3 strain matrix + function get_strain(velgradgp) result(strain) + double precision, intent(in) :: velgradgp(8) + double precision :: strain(3,3) + + ! get local symmetrised strain matrix, i.e. 1/ 2 * (S + S^T) + ! where + ! /u_x u_y u_z\ + ! S = |v_x v_y v_z| + ! \w_x w_y w_z/ + ! with u_* = du/d* (also derivatives of v and w). + ! dw/dz is obtained from incompressibility + ! dw/dz = - (du/dx + dv/dy) + ! + ! / 2 * u_x u_y + v_x u_z + w_x\ + ! 1/2 * (S + S^T) = 1/2 * |u_y + v_x 2 * v_y v_z + w_y| + ! \u_z + w_x v_z + w_y 2 * w_z/ + ! + ! S11 = du/dx + ! S12 = 1/2 * (du/dy + dv/dx) + ! S13 = 1/2 * (du/dz + dw/dx) + ! S22 = dv/dy + ! S23 = 1/2 * (dv/dz + dw/dy) + ! S33 = dw/dz = - (du/dx + dv/dy) + + strain(1, 1) = velgradgp(I_DUDX) ! S11 + strain(1, 2) = f12 * (velgradgp(I_DUDY) + velgradgp(I_DVDX)) ! S12 + strain(1, 3) = f12 * (velgradgp(I_DUDZ) + velgradgp(I_DWDX)) ! S13 + strain(2, 1) = strain(1, 2) + strain(2, 2) = velgradgp(I_DVDY) ! S22 + strain(2, 3) = f12 * (velgradgp(I_DVDZ) + velgradgp(I_DWDY)) ! S23 + strain(3, 1) = strain(1, 3) + strain(3, 2) = strain(2, 3) + strain(3, 3) = -(velgradgp(I_DUDX) + velgradgp(I_DVDY)) ! S33 + end function get_strain + ! Estimate a suitable time step based on the velocity strain ! and buoyancy gradient. ! @param[in] t is the time @@ -102,40 +131,7 @@ function get_time_step(t) result(dt) do ix = box%lo(1), box%hi(1) do iy = box%lo(2), box%hi(2) do iz = 0, nz - ! get local symmetrised strain matrix, i.e. 1/ 2 * (S + S^T) - ! where - ! /u_x u_y u_z\ - ! S = |v_x v_y v_z| - ! \w_x w_y w_z/ - ! with u_* = du/d* (also derivatives of v and w). - ! The derivatives dv/dx, du/dz, dv/dz and dw/dz are calculated - ! with vorticity or the assumption of incompressibility - ! (du/dx + dv/dy + dw/dz = 0): - ! dv/dx = \zeta + du/dy - ! du/dz = \eta + dw/dx - ! dv/dz = dw/dy - \xi - ! dw/dz = - (du/dx + dv/dy) - ! - ! / 2 * u_x u_y + v_x u_z + w_x\ - ! 1/2 * (S + S^T) = 1/2 * |u_y + v_x 2 * v_y v_z + w_y| - ! \u_z + w_x v_z + w_y 2 * w_z/ - ! - ! S11 = du/dx - ! S12 = 1/2 * (du/dy + dv/dx) = 1/2 * (2 * du/dy + \zeta) = du/dy + 1/2 * \zeta - ! S13 = 1/2 * (du/dz + dw/dx) = 1/2 * (\eta + 2 * dw/dx) = 1/2 * \eta + dw/dx - ! S22 = dv/dy - ! S23 = 1/2 * (dv/dz + dw/dy) = 1/2 * (2 * dw/dy - \xi) = dw/dy - 1/2 * \xi - ! S33 = dw/dz = - (du/dx + dv/dy) - strain(1, 1) = velgradg(iz, iy, ix, I_DUDX) ! S11 - strain(1, 2) = velgradg(iz, iy, ix, I_DUDY) + f12 * vortg(iz, iy, ix, I_Z) ! S12 - strain(1, 3) = velgradg(iz, iy, ix, I_DWDX) + f12 * vortg(iz, iy, ix, I_Y) ! S13 - strain(2, 1) = strain(1, 2) - strain(2, 2) = velgradg(iz, iy, ix, I_DVDY) ! S22 - strain(2, 3) = velgradg(iz, iy, ix, I_DWDY) - f12 * vortg(iz, iy, ix, I_X) ! S23 - strain(3, 1) = strain(1, 3) - strain(3, 2) = strain(2, 3) - strain(3, 3) = -(velgradg(iz, iy, ix, I_DUDX) + velgradg(iz, iy, ix, I_DVDY)) ! S33 - + strain = get_strain(velgradg(iz, iy, ix,:)) ! calculate its eigenvalues. The Jacobi solver ! requires the upper triangular matrix only. call scherzinger_eigenvalues(strain, D) @@ -166,7 +162,7 @@ function get_time_step(t) result(dt) db2 = db2 + gradb ** 2 ! db/dz - gradb = f12 * dxi(I_Z) * (tbuoyg(1:nz+1, box%lo(2):box%hi(2), box%lo(1):box%hi(1)) & + gradb = f12 * dxi(I_Z) * (tbuoyg(1:nz+1, box%lo(2):box%hi(2), box%lo(1):box%hi(1)) & - tbuoyg(-1:nz-1, box%lo(2):box%hi(2), box%lo(1):box%hi(1))) #ifdef ENABLE_BUOYANCY_PERTURBATION_MODE @@ -219,4 +215,35 @@ function get_time_step(t) result(dt) endif end function get_time_step + ! @returns the strain magnitude for use in damping routine + subroutine get_strain_magnitude_field + double precision :: strain(n_dim, n_dim) + integer :: ix, iy, iz + + do ix = box%lo(1), box%hi(1) + do iy = box%lo(2), box%hi(2) + do iz = 0, nz + strain = get_strain(velgradg(iz, iy, ix,:)) + strain_mag(iz, iy, ix) = sqrt(two * (strain(1, 1) * strain(1, 1) +& + strain(1, 2) * strain(1, 2) +& + strain(1, 3) * strain(1, 3) +& + strain(2, 1) * strain(2, 1) +& + strain(2, 2) * strain(2, 2) +& + strain(2, 3) * strain(2, 3) +& + strain(3, 1) * strain(3, 1) +& + strain(3, 2) * strain(3, 2) +& + strain(3, 3) * strain(3, 3) )) + enddo + ! Reflect beyond boundaries to ensure damping is conservative + ! This is because the points below the surface contribute to the level above + strain_mag(-1, iy, ix) = strain_mag(1, iy, ix) + strain_mag(nz+1, iy, ix) = strain_mag(nz-1, iy, ix) + enddo + enddo + + ! We need this halo fill to obtain good conservation + call field_halo_fill_scalar(strain_mag, l_alloc=.true.) + + end subroutine get_strain_magnitude_field + end module rk_utils diff --git a/src/3d/utils/options.f90 b/src/3d/utils/options.f90 index dc808c0e2..05655511d 100644 --- a/src/3d/utils/options.f90 +++ b/src/3d/utils/options.f90 @@ -62,6 +62,7 @@ module options double precision :: parcel_freq = one logical :: overwrite = .false. logical :: write_parcels = .true. + character(len=32), dimension(128) :: parcel_list = '' double precision :: parcel_stats_freq = one logical :: write_parcel_stats = .true. double precision :: field_stats_freq = one @@ -106,6 +107,17 @@ module options type(time_info_type) :: time + ! damping model + type damping_info_type + double precision :: vorticity_prefactor = 1.0d0 ! constant in damping implementation for vorticity + double precision :: scalars_prefactor = 1.0d0 ! constant in damping implementation for scalars + logical :: l_vorticity = .false. ! use damping on vorticity + logical :: l_scalars = .false. ! use damping on scalars + end type damping_info_type + + type(damping_info_type) :: damping + + contains ! parse configuration file ! (see https://cyber.dabamos.de/programming/modernfortran/namelists.html [8 March 2021]) @@ -115,7 +127,7 @@ subroutine read_config_file logical :: exists = .false. ! namelist definitions - namelist /EPIC/ field_file, flux_file, rk_order, boundary, output, parcel, time + namelist /EPIC/ field_file, flux_file, rk_order, boundary, output, parcel, time, damping ! check whether file exists inquire(file=filename, exist=exists) @@ -188,6 +200,11 @@ subroutine write_netcdf_options(ncid) call write_netcdf_attribute(ncid, "initial", time%initial) call write_netcdf_attribute(ncid, "precise_stop", time%precise_stop) call write_netcdf_attribute(ncid, "alpha", time%alpha) + + call write_netcdf_attribute(ncid, "damping_vorticity_prefactor", damping%vorticity_prefactor) + call write_netcdf_attribute(ncid, "damping_scalars_prefactor", damping%scalars_prefactor) + call write_netcdf_attribute(ncid, "damping_l_vorticity", damping%l_vorticity) + call write_netcdf_attribute(ncid, "damping_l_scalars", damping%l_scalars) end subroutine write_netcdf_options diff --git a/src/3d/utils/utils.f90 b/src/3d/utils/utils.f90 index db49290e4..0f1b1193c 100644 --- a/src/3d/utils/utils.f90 +++ b/src/3d/utils/utils.f90 @@ -21,8 +21,11 @@ module utils use parcel_interpl, only : par2grid, grid2par use netcdf_reader, only : get_file_type, get_num_steps, get_time, get_netcdf_box use parameters, only : lower, extent, update_parameters, read_zeta_boundary_flag & - , set_zeta_boundary_flag + , set_zeta_boundary_flag, ny use bndry_fluxes, only : read_bndry_fluxes + use netcdf_utils, only : NF90_NOWRITE & + , close_netcdf_file & + , open_netcdf_file use physics, only : read_physical_quantities & #ifdef ENABLE_BUOYANCY_PERTURBATION_MODE , calculate_basic_reference_state & @@ -237,7 +240,7 @@ subroutine setup_fields_and_parcels endif call read_bndry_fluxes(trim(flux_file)) - + #ifdef ENABLE_BUOYANCY_PERTURBATION_MODE ! Calculate the squared buoyancy frequency if not provided. call calculate_basic_reference_state(nx, ny, nz, extent(3), tbuoyg) diff --git a/src/netcdf/netcdf_reader.f90 b/src/netcdf/netcdf_reader.f90 index c65cef39f..cad2ea4a7 100644 --- a/src/netcdf/netcdf_reader.f90 +++ b/src/netcdf/netcdf_reader.f90 @@ -150,9 +150,11 @@ function has_dataset(ncid, name) result(link_exists) link_exists = (ncerr == nf90_noerr) - ncerr = nf90_inquire_variable(ncid, varid) + if (link_exists) then + ncerr = nf90_inquire_variable(ncid, varid) + link_exists = (ncerr == nf90_noerr) + endif - link_exists = (link_exists .and. (ncerr == nf90_noerr)) ncerr = 0 end function has_dataset diff --git a/src/netcdf/netcdf_utils.f90 b/src/netcdf/netcdf_utils.f90 index 4879554ab..773f63638 100644 --- a/src/netcdf/netcdf_utils.f90 +++ b/src/netcdf/netcdf_utils.f90 @@ -18,6 +18,16 @@ module netcdf_utils character(*), parameter :: version_name = package // '_version' + type netcdf_info + character(32) :: name = '' + character(128) :: long_name = '' + character(128) :: std_name = '' + character(16) :: unit = '' + integer :: dtype = -1 + integer :: varid = -1 + logical :: l_enabled = .false. + end type netcdf_info + contains ! This subroutine takes an array of length 3 or 4 diff --git a/unit-tests/3d/test_mpi_parcel_read.f90 b/unit-tests/3d/test_mpi_parcel_read.f90 index 4cb7dc505..3d784d229 100644 --- a/unit-tests/3d/test_mpi_parcel_read.f90 +++ b/unit-tests/3d/test_mpi_parcel_read.f90 @@ -9,13 +9,14 @@ program test_mpi_parcel_read use parcel_container use parameters, only : set_max_num_parcels use parcel_netcdf + use netcdf_utils use mpi_environment use mpi_timer implicit none logical :: passed = .true. double precision :: res - integer :: n, start_index, n_parcels_before + integer :: n, start_index, n_parcels_before call set_max_num_parcels(100000000) diff --git a/unit-tests/3d/test_mpi_parcel_read_rejection.f90 b/unit-tests/3d/test_mpi_parcel_read_rejection.f90 index 6033e139b..67a6b1370 100644 --- a/unit-tests/3d/test_mpi_parcel_read_rejection.f90 +++ b/unit-tests/3d/test_mpi_parcel_read_rejection.f90 @@ -7,12 +7,17 @@ ! ============================================================================= program test_mpi_parcel_read_rejection use unit_test - use options, only : parcel - use constants, only : zero, f12 + use options, only : parcel, write_netcdf_options + use constants, only : one, zero, f12 use parcel_container use parcel_netcdf use mpi_environment use mpi_layout + use netcdf_utils + use netcdf_writer + use physics, only : write_physical_quantities + use config, only : package_version, cf_version + use iomanip, only : zfill use parameters, only : lower, update_parameters, extent, nx, ny, nz, dx, max_num_parcels use mpi_timer implicit none diff --git a/unit-tests/3d/test_mpi_parcel_write.f90 b/unit-tests/3d/test_mpi_parcel_write.f90 index c341fb46a..a654c9418 100644 --- a/unit-tests/3d/test_mpi_parcel_write.f90 +++ b/unit-tests/3d/test_mpi_parcel_write.f90 @@ -1,4 +1,4 @@ -! ============================================================================= +! ============================================================================r ! Test parallel parcel writing ! ! This unit test checks the writing of parcels in parallel. @@ -8,6 +8,7 @@ program test_mpi_parcel_write use constants, only : zero use parcel_container use parcel_netcdf + use netcdf_utils use mpi_environment use mpi_timer implicit none diff --git a/unit-tests/3d/test_mpi_vel2vgrad.f90 b/unit-tests/3d/test_mpi_vel2vgrad.f90 index 9ce499e3b..3f6e41be0 100644 --- a/unit-tests/3d/test_mpi_vel2vgrad.f90 +++ b/unit-tests/3d/test_mpi_vel2vgrad.f90 @@ -32,7 +32,9 @@ program test_mpi_vel2vgrad use unit_test use constants, only : zero, one, two, four, pi, twopi use parameters, only : lower, update_parameters, dx, nx, ny, nz, extent - use fields, only : vortg, velog, velgradg, field_default + use fields, only : vortg, velog, velgradg, field_default, I_DUDX, I_DUDY, I_DUDZ, I_DVDX, I_DVDY, I_DVDZ, I_DWDX, I_DWDY + use dimensions, only : I_X, I_Y, I_Z + use parcel_ellipsoid, only : get_B33, I_B11, I_B12, I_B13, I_B22, I_B23 use inversion_utils, only : init_inversion, finalise_inversion use inversion_mod, only : vel2vgrad use sta3dfft, only : fftxyp2s @@ -70,7 +72,7 @@ program test_mpi_vel2vgrad call field_default - allocate(strain(-1:nz+1, box%hlo(2):box%hhi(2), box%hlo(1):box%hhi(1), 5)) + allocate(strain(-1:nz+1, box%hlo(2):box%hhi(2), box%hlo(1):box%hhi(1), 8)) allocate(svelog(0:nz, box%lo(2):box%hi(2), box%lo(1):box%hi(1), 3)) call init_inversion @@ -83,21 +85,25 @@ program test_mpi_vel2vgrad z = lower(3) + iz * dx(3) ! velocity - velog(iz, iy, ix, 1) = AA * dcos(a * x) * dsin(b * y) * dsin(c * z) - velog(iz, iy, ix, 2) = BB * dsin(a * x) * dcos(b * y) * dsin(c * z) - velog(iz, iy, ix, 3) = CC * dsin(a * x) * dsin(b * y) * dcos(c * z) + velog(iz, iy, ix, I_X) = AA * dcos(a * x) * dsin(b * y) * dsin(c * z) + velog(iz, iy, ix, I_Y) = BB * dsin(a * x) * dcos(b * y) * dsin(c * z) + velog(iz, iy, ix, I_Z) = CC * dsin(a * x) * dsin(b * y) * dcos(c * z) ! vorticity - vortg(iz, iy, ix, 1) = (b * CC - c * BB) * dsin(a * x) * dcos(b * y) * dcos(c * z) - vortg(iz, iy, ix, 2) = (c * AA - a * CC) * dcos(a * x) * dsin(b * y) * dcos(c * z) - vortg(iz, iy, ix, 3) = (a * BB - b * AA) * dcos(a * x) * dcos(b * y) * dsin(c * z) + vortg(iz, iy, ix, I_X) = (b * CC - c * BB) * dsin(a * x) * dcos(b * y) * dcos(c * z) + vortg(iz, iy, ix, I_Y) = (c * AA - a * CC) * dcos(a * x) * dsin(b * y) * dcos(c * z) + vortg(iz, iy, ix, I_Z) = (a * BB - b * AA) * dcos(a * x) * dcos(b * y) * dsin(c * z) ! velocity gradient tensor (reference solution) - strain(iz, iy, ix, 1) = - a * AA * dsin(a * x) * dsin(b * y) * dsin(c * z) ! du/dx - strain(iz, iy, ix, 2) = b * AA * dcos(a * x) * dcos(b * y) * dsin(c * z) ! du/dy - strain(iz, iy, ix, 3) = - b * BB * dsin(a * x) * dsin(b * y) * dsin(c * z) ! dv/dy - strain(iz, iy, ix, 4) = a * CC * dcos(a * x) * dsin(b * y) * dcos(c * z) ! dw/dx - strain(iz, iy, ix, 5) = b * CC * dsin(a * x) * dcos(b * y) * dcos(c * z) ! dw/dy + strain(iz, iy, ix, I_DUDX) = - a * AA * dsin(a * x) * dsin(b * y) * dsin(c * z) ! du/dx + strain(iz, iy, ix, I_DUDY) = b * AA * dcos(a * x) * dcos(b * y) * dsin(c * z) ! du/dy + strain(iz, iy, ix, I_DVDY) = - b * BB * dsin(a * x) * dsin(b * y) * dsin(c * z) ! dv/dy + strain(iz, iy, ix, I_DWDX) = a * CC * dcos(a * x) * dsin(b * y) * dcos(c * z) ! dw/dx + strain(iz, iy, ix, I_DWDY) = b * CC * dsin(a * x) * dcos(b * y) * dcos(c * z) ! dw/dy + strain(iz, iy, ix, I_DUDZ) = strain(iz, iy, ix, I_DWDX) + vortg(iz, iy, ix, I_Y) + strain(iz, iy, ix, I_DVDX) = strain(iz, iy, ix, I_DUDY) + vortg(iz, iy, ix, I_Z) + strain(iz, iy, ix, I_DVDZ) = strain(iz, iy, ix, I_DWDY) - vortg(iz, iy, ix, I_X) + enddo enddo enddo