Skip to content

Commit

Permalink
Ten diffusive domains w schism boundary (#573)
Browse files Browse the repository at this point in the history
* f90 files

* step2

* run all ten diffusive domains in serial with schism depth boundary data

* code update 6/29/2022

* run all ten diffusive domains in serial with schism boundary

* more clean up

* comment out unused maxCourant variable

* removed private file paths

* further cleanup

* change dt_schisma and dt_db_g and cleanup

* remove coastal_domain_testing.yaml

* remove old files in LowererColorado_TX folder

* remove 2 more old files in LowerColorado_TX folder

* updated hydrofabric & natural xsec files

* remove private path in yaml

* change default value of diffusivity limits

* remove list()

* list() removed

* remove two natural xsec nc files

* updated  mask file only for Lower Colorado River

* remove data of HurricaneLaura

Co-authored-by: Dong Kim <[email protected]>
  • Loading branch information
kumdonoaa and Dong Kim authored Jul 18, 2022
1 parent ca36a2e commit 0c9ec38
Show file tree
Hide file tree
Showing 21 changed files with 13,261 additions and 1,685 deletions.
208 changes: 112 additions & 96 deletions src/kernel/diffusive/diffusive.f90

Large diffs are not rendered by default.

116 changes: 91 additions & 25 deletions src/troute-network/troute/nhd_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
import netCDF4
from joblib import delayed, Parallel
from cftime import date2num
import dateutil.parser as dparser
from datetime import datetime, timedelta

from troute.nhd_network import reverse_dict

Expand Down Expand Up @@ -78,7 +80,7 @@ def read_config_file(custom_input_file):
compute_parameters (dict): Input parameters re computation settings
forcing_parameters (dict): Input parameters re model forcings
restart_parameters (dict): Input parameters re model restart
diffusive_parameters (dict): Input parameters re diffusive wave model
hybrid_parameters (dict): Input parameters re diffusive wave model
output_parameters (dict): Input parameters re output writing
parity_parameters (dict): Input parameters re parity assessment
data_assimilation_parameters (dict): Input parameters re data assimilation
Expand Down Expand Up @@ -106,7 +108,7 @@ def read_config_file(custom_input_file):
compute_parameters = data.get("compute_parameters", {})
forcing_parameters = compute_parameters.get("forcing_parameters", {})
restart_parameters = compute_parameters.get("restart_parameters", {})
diffusive_parameters = compute_parameters.get("diffusive_parameters", {})
hybrid_parameters = compute_parameters.get("hybrid_parameters", {})
data_assimilation_parameters = compute_parameters.get(
"data_assimilation_parameters", {}
)
Expand All @@ -121,7 +123,7 @@ def read_config_file(custom_input_file):
compute_parameters,
forcing_parameters,
restart_parameters,
diffusive_parameters,
hybrid_parameters,
output_parameters,
parity_parameters,
data_assimilation_parameters,
Expand Down Expand Up @@ -150,6 +152,30 @@ def read_diffusive_domain(domain_file):

return data

def read_coastal_boundary_domain(domain_file):
'''
Read coastal boundary domain from .ymal or .json file.
Arguments
---------
domain_file (str or pathlib.Path): Path of coastal boundary domain file
Returns
-------
data (dict int: int): diffusive domain tailwater segments: coastal domain segments
'''

if domain_file[-4:] == "yaml":
with open(domain_file) as domain:
data = yaml.load(domain, Loader=yaml.SafeLoader)
else:
with open(domain_file) as domain:
data = json.load(domain)

return data

def read_custom_input(custom_input_file):
if custom_input_file[-4:] == "yaml":
with open(custom_input_file) as custom_file:
Expand Down Expand Up @@ -201,7 +227,6 @@ def read_lakeparm(
Reads LAKEPARM file and prepares a dataframe, filtered
to the relevant reservoirs, to provide the parameters
for level-pool reservoir computation.
Completely replaces the read_waterbody_df function from prior versions
of the v02 routing code.
Expand All @@ -213,7 +238,6 @@ def read_lakeparm(
Returns:
df1 (DataFrame):
"""

# TODO: avoid or parameterize "feature_id" or ... return to name-blind dataframe version
Expand Down Expand Up @@ -385,23 +409,17 @@ def get_ql_from_wrf_hydro_mf(
value_col: column/field in the CHRTOUT files with the lateral inflow value
gw_col: column/field in the CHRTOUT files with the groundwater bucket flux value
runoff_col: column/field in the CHRTOUT files with the runoff from terrain routing value
In general the CHRTOUT files contain one value per time step. At present, there is
no capability for handling non-uniform timesteps in the qlaterals.
The qlateral may also be input using comma delimited file -- see
`get_ql_from_csv`
Note/Todo:
For later needs, filtering for specific features or times may
be accomplished with one of:
ds.loc[{selectors}]
ds.sel({selectors})
ds.isel({selectors})
Returns from these selection functions are sub-datasets.
For example:
```
(Pdb) ds.sel({"feature_id":[4186117, 4186169],"time":ds.time.values[:2]})['q_lateral'].to_dataframe()
Expand All @@ -410,7 +428,6 @@ def get_ql_from_wrf_hydro_mf(
2018-01-01 13:00:00 4186117 41.233807 -75.413895 0.006496
2018-01-02 00:00:00 4186117 41.233807 -75.413895 0.006460
```
or...
```
(Pdb) ds.sel({"feature_id":[4186117, 4186169],"time":[np.datetime64('2018-01-01T13:00:00')]})['q_lateral'].to_dataframe()
Expand Down Expand Up @@ -485,12 +502,20 @@ def write_chanobs(
-------------
'''

# TODO: for and if statements are improvised just in case when link of gage location is not in flowveldepth, which should be fixed
link_na = []
for link in link_gage_df.index:
if link not in flowveldepth.index:
link_na.append(link)
link_gage_df_nona = link_gage_df.drop(link_na)

# array of segment linkIDs at gage locations. Results from these segments will be written
gage_feature_id = link_gage_df.index.to_numpy(dtype = "int64")
#gage_feature_id = link_gage_df.index.to_numpy(dtype = "int64")
gage_feature_id = link_gage_df_nona.index.to_numpy(dtype = "int64")

# array of simulated flow data at gage locations
gage_flow_data = flowveldepth.loc[link_gage_df.index].iloc[:,::3].to_numpy(dtype="float32")
# array of simulated flow data at gage locations
#gage_flow_data = flowveldepth.loc[link_gage_df.index].iloc[:,::3].to_numpy(dtype="float32")
gage_flow_data = flowveldepth.loc[link_gage_df_nona.index].iloc[:,::3].to_numpy(dtype="float32")

# array of simulation time
gage_flow_time = [t0 + timedelta(seconds = (i+1) * dt) for i in range(nts)]
Expand Down Expand Up @@ -571,7 +596,6 @@ def write_chanobs(
fill_value = np.nan
)
y[:] = gage_flow_data.T

# =========== GLOBAL ATTRIBUTES ===============
f.setncatts(
{
Expand Down Expand Up @@ -743,10 +767,8 @@ def get_ql_from_wrf_hydro(qlat_files, index_col="station_id", value_col="q_later
qlat_files: globbed list of CHRTOUT files containing desired lateral inflows
index_col: column/field in the CHRTOUT files with the segment/link id
value_col: column/field in the CHRTOUT files with the lateral inflow value
In general the CHRTOUT files contain one value per time step. At present, there is
no capability for handling non-uniform timesteps in the qlaterals.
The qlateral may also be input using comma delimited file -- see
`get_ql_from_csv`
"""
Expand Down Expand Up @@ -943,12 +965,10 @@ def get_usgs_df_from_csv(usgs_csv, routelink_subset_file, index_col="link"):
usgs_csv - csv file with SEGMENT IDs in the left-most column labeled with "link",
and date-headed values from time-slice files in the format
"2018-09-18 00:00:00"
It is assumed that the segment crosswalk and interpolation have both
already been performed, so we do not need to comprehend
the potentially non-numeric byte-strings associated with gage IDs, nor
do we need to interpolate anything here as when we read from the timeslices.
If that were necessary, we might use a solution such as proposed here:
https://stackoverflow.com/a/35058538
note that explicit typing of the index cannot be done on read and
Expand Down Expand Up @@ -1221,7 +1241,6 @@ def get_channel_restart_from_wrf_hydro(
default_us_flow_column: name used in remainder of program to refer to this column of the dataset
default_ds_flow_column: name used in remainder of program to refer to this column of the dataset
default_depth_column: name used in remainder of program to refer to this column of the dataset
The Restart file gives hlink, qlink1, and qlink2 values for channels --
the order is simply the same as that found in the Route-Link files.
*Subnote 1*: The order of these values is NOT the order found in the CHRTOUT files,
Expand Down Expand Up @@ -1353,7 +1372,6 @@ def write_hydro_rst(
):
"""
Write t-route flow and depth data to WRF-Hydro restart files.
Agruments
---------
data (Data Frame): t-route simulated flow, velocity and depth data
Expand All @@ -1366,7 +1384,6 @@ def write_hydro_rst(
troute_us_flow_var_name (str):
troute_ds_flow_var_name (str):
troute_depth_var_name (str):
Returns
-------
"""
Expand Down Expand Up @@ -1473,7 +1490,6 @@ def get_reservoir_restart_from_wrf_hydro(
waterbody_depth_column: column in the restart file to use for downstream flow initial state
default_waterbody_flow_column: name used in remainder of program to refer to this column of the dataset
default_waterbody_depth_column: name used in remainder of program to refer to this column of the dataset
The Restart file gives qlakeo and resht values for waterbodies.
The order of values in the file is the same as the order in the LAKEPARM file from WRF-Hydro.
However, there are many instances where only a subset of waterbodies described in the lakeparm
Expand Down Expand Up @@ -1523,7 +1539,57 @@ def build_coastal_ncdf_dataframe(coastal_ncdf):
coastal_ncdf_df = ds[["elev", "depth"]]
return coastal_ncdf_df.to_dataframe()

def build_coastal_ncdf_dataframe(
coastal_files,
coastal_boundary_domain,
):
# retrieve coastal elevation, topo depth, and temporal data
ds = netCDF4.Dataset(filename = coastal_files, mode = 'r', format = "NETCDF4")

tws = list(coastal_boundary_domain.keys())
coastal_boundary_nodes = list(coastal_boundary_domain.values())

elev_NAVD88 = ds.variables['elev'][:, coastal_boundary_nodes].filled(fill_value = np.nan)
depth_bathy = ds.variables['depth'][coastal_boundary_nodes].filled(fill_value = np.nan)
timesteps = ds.variables['time'][:]
if len(timesteps) > 1:
dt_schism = timesteps[1]-timesteps[0]
else:
raise RuntimeError("schism provided less than 2 time steps")

start_date = ds.variables['time'].units
start_date = dparser.parse(start_date,fuzzy=True)
dt_timeslice = timedelta(minutes=dt_schism/60.0)
tfin = start_date + dt_timeslice*len(timesteps)
timestamps = pd.date_range(start_date, tfin, freq=dt_timeslice)
timestamps = timestamps.strftime('%Y-%m-%d %H:%M:%S')

# create a dataframe of water depth at coastal domain nodes
timeslice_schism_list=[]
for t in range(0, len(timesteps)+1):
timeslice= np.full(len(tws), timestamps[t])
if t==0:
depth = np.nan
else:
depth = elev_NAVD88[t-1,:] + depth_bathy

timeslice_schism = (pd.DataFrame({
'stationId' : tws,
'datetime' : timeslice,
'depth' : depth
}).
set_index(['stationId', 'datetime']).
unstack(1, fill_value = np.nan)['depth'])

timeslice_schism_list.append(timeslice_schism)

coastal_boundary_depth_df = pd.concat(timeslice_schism_list, axis=1, ignore_index=False)

# linearly extrapolate depth value at start date
coastal_boundary_depth_df.iloc[:,0] = 2.0*coastal_boundary_depth_df.iloc[:,1] - coastal_boundary_depth_df.iloc[:,2]

return coastal_boundary_depth_df

def lastobs_df_output(
lastobs_df,
dt,
Expand Down
2 changes: 1 addition & 1 deletion src/troute-network/troute/nhd_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,7 +269,7 @@ def reachable_network(N, sources=None, targets=None, check_disjoint=True):

rv = {}
for k, n in reached.items():
rv[k] = {m: N.get(m, []) for m in n}
rv[k] = {m: N.get(m, []) for m in n}
return rv


Expand Down
3 changes: 1 addition & 2 deletions src/troute-network/troute/nhd_network_utilities_v02.py
Original file line number Diff line number Diff line change
Expand Up @@ -845,7 +845,7 @@ def build_refac_connections(diff_network_parameters):

# set parameter dataframe index as segment id number, sort
param_df = param_df.set_index("key").sort_index()

# There can be an externally determined terminal code -- that's this first value
terminal_codes = set()
terminal_codes.update(terminal_code)
Expand All @@ -865,4 +865,3 @@ def build_refac_connections(diff_network_parameters):
)

return connections

Loading

0 comments on commit 0c9ec38

Please sign in to comment.