Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 6 additions & 4 deletions demeter/change/expansion.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,8 @@ def extense_parallel_helper(regix_metix, log, c, allregnumber, allregmet, spat_l
# transitions[reg_met_mask, :, :] += trans_mat

# calculate non-achieved change
transitions[reg_met_mask, :, :] += trans_mat
if transitions.size != 1:
transitions[reg_met_mask, :, :] += trans_mat

non_chg = np.sum(abs(target_change[:, :, :])) / 2.

Expand Down Expand Up @@ -100,7 +101,8 @@ def _convert_pft(notdone, exp_target, met_idx, pft_toconv, spat_ludataharm_sub,
# select the grid cells to expand; user defines whether to use stochastic draw or select the grid cells
# with the highest likelihood
if stochastic_expansion == 1:
drawcells = stats.binom.rvs(1, expansion_likelihood / np.nanmax(expansion_likelihood))
drawcells = stats.binom.rvs(1, expansion_likelihood / np.nanmax(expansion_likelihood),
size=expansion_likelihood.shape)
else:
drawcells = expansion_likelihood >= selection_threshold * np.nanmax(expansion_likelihood)

Expand Down Expand Up @@ -128,7 +130,7 @@ def _convert_pft(notdone, exp_target, met_idx, pft_toconv, spat_ludataharm_sub,
target_change[reg, met_idx, pft] -= np.sum(actexpansion)
exp_target -= np.sum(actexpansion)
target_change[reg, met_idx, pft_toconv] += np.sum(actexpansion)
trans_mat[exist_cells[candidatecells], pft, pft_toconv] += actexpansion
#trans_mat[exist_cells[candidatecells], pft, pft_toconv] += actexpansion

# account for target change minuscule values when evaluating notdone
tc = round(target_change[reg, met_idx, pft_toconv], 4)
Expand Down Expand Up @@ -156,7 +158,7 @@ def _expansion(diagnostic, diag_file, spat_ludataharm_sub, kernel_vector_sub, co
l_ord = len(order_rules)

# initialize transition arrays
trans_mat = np.zeros((l_shs, l_ord, l_ord))
trans_mat = np.zeros(shape=1)

# process PFTs in order
for pft_ord in np.unique(order_rules):
Expand Down
9 changes: 5 additions & 4 deletions demeter/change/intensification.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ def intense_parallel_helper(regix_metix, spat_region, order_rules, allregnumber,
# arr_reshaped = trans_mat.reshape(trans_mat.shape[0], -1)
# np.savetxt("test.csv", arr_reshaped, delimiter=",")
# log transition
transitions[reg_met_mask, :, :] += trans_mat
if transitions.size != 1:
transitions[reg_met_mask, :, :] += trans_mat

# calculate non-achieved change

Expand All @@ -75,7 +76,7 @@ def intense_parallel_helper(regix_metix, spat_region, order_rules, allregnumber,
else:
non_chg_per = 0

# log.info("Total non-achieved intensification change for pass {0} time step {1}: {2} km2 ({3} %)".format(pass_number, yr, non_chg, non_chg_per))
log.info("Total non-achieved intensification change for pass {0} time step {1}: {2} km2 ({3} %)".format(pass_number, yr, non_chg, non_chg_per))


def diff_diagnostic(diag_outdir, d_regid_nm, gcam_landmatrix, spat_landmatrix, reg, yr, yr_idx):
Expand Down Expand Up @@ -162,7 +163,7 @@ def _convert_pft(notdone, int_target, metnumber, pft_toconv, spat_ludataharm_sub
target_intensification[metnumber - 1, pft] -= actual_expansion_sum
target_change[reg, metnumber - 1, pft_toconv] += actual_expansion_sum
target_intensification[metnumber - 1, pft_toconv] += actual_expansion_sum
trans_mat[exist_cells, pft, pft_toconv] += actexpansion
#trans_mat[exist_cells, pft, pft_toconv] += actexpansion

# account for target change minuscule values when evaluating notdone
tc = round(target_change[reg, metnumber - 1, pft_toconv], 4)
Expand Down Expand Up @@ -193,7 +194,7 @@ def _intensification(diagnostic, diag_file, spat_ludataharm_sub, target_intensif
l_ord = len(order_rules)

# initialize transition arrays
trans_mat = np.zeros((l_shs, l_ord, l_ord))
trans_mat = np.zeros(shape=1)

# process PFTs in order
for pft_ord in np.unique(order_rules):
Expand Down
33 changes: 29 additions & 4 deletions demeter/demeter_io/reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
import numpy as np
import pandas as pd
import gcamreader
import xarray as xr


class ValidationException(Exception):
Expand Down Expand Up @@ -390,8 +391,29 @@ def read_base(config, observed_landclasses, sequence_metric_dict, metric_seq, re
:param region_seq: An ordered list of expected region ids

"""

xr_data = xr.open_dataset(config.observed_lu_file)
xr_df = xr_data.to_dataframe().reset_index().dropna()
#print(xr_df.head())
#print(max(xr_df["region_id"]))

df = pd.read_csv(config.observed_lu_file, compression='infer')
name_map = {
var: xr_data[var].attrs.get("long_name", var) # fall back to var name if no long_name
for var in xr_data.data_vars}

xr_df = xr_df.rename(columns=name_map)


colnames_for_rename=(xr_df.drop(["region_id","basin_id","latitude","longitude"],axis=1).columns)

area= 0.00151872768*0.00151872768

xr_df[colnames_for_rename] = xr_df[colnames_for_rename].multiply(area, axis="index")
xr_df= xr_df.dropna()

xr_df["fid"] = range(1, len(xr_df) + 1)
df= xr_df
#df = pd.read_csv(config.observed_lu_file, compression='infer')

# rename columns as lower case
df.columns = [i.lower() for i in df.columns]
Expand Down Expand Up @@ -460,16 +482,19 @@ def read_base(config, observed_landclasses, sequence_metric_dict, metric_seq, re
spat_region[spat_region == 30] = 11

# cell area from lat: lat_correction_factor * (lat_km at equator * lon_km at equator) * (resolution squared) = sqkm
cellarea = np.cos(np.radians(spat_coords[:, 0])) * (111.32 * 110.57) * (config.spatial_resolution**2)

cellarea = np.cos(np.radians(spat_coords[:, 0])) * (111.32 * 110.57) * (config.spatial_resolution**2)*1000000
#import pandas as pd
cel= pd.DataFrame(cellarea)
cel.to_csv("cell_area.csv")
#cellarea= 2.30653376599818E-06

# create an array with the actual percentage of the grid cell included in the data; some are cut by AEZ or Basin
# polygons others have no-data in land cover
celltrunk = (np.sum(spat_ludata, axis=1) + spat_water) / (config.spatial_resolution ** 2)

# adjust land cover area based on the percentage of the grid cell represented
spat_ludata = spat_ludata / (config.spatial_resolution ** 2) * np.transpose([cellarea, ] * len(observed_landclasses))

return [spat_ludata, spat_water, spat_coords, spat_metric_region, spat_grid_id, spat_metric, spat_region, ngrids,
cellarea, celltrunk, sequence_metric_dict]

Expand Down
3 changes: 2 additions & 1 deletion demeter/demeter_io/writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ def lc_timestep_csv(c, yr, final_landclasses, spat_coords, metric_id_array, gcam
hdr = "latitude,longitude,{0}_id,region_id,water,{1}".format(metric.lower(), ','.join(final_landclasses))

# format data
#print(spat_coords)
arr = np.hstack((
# latitude, longitude
spat_coords,
Expand Down Expand Up @@ -166,7 +167,7 @@ def lc_timestep_csv(c, yr, final_landclasses, spat_coords, metric_id_array, gcam
t_joined = t_joined.reset_index(drop=True)

x = nc.DemeterToNetcdf(scenario_name=str(sce),
project_name="",
project_name="TexasBasin",
start_year=2005,
end_year=2005,
resolution=resolution,
Expand Down
Loading
Loading