Skip to content

Commit

Permalink
Merge pull request #169 from OMS-NetZero/purge_logger
Browse files Browse the repository at this point in the history
remove logger warnings and debugs
  • Loading branch information
chrisroadmap authored Nov 23, 2024
2 parents 56b1a95 + 596b9dd commit bb2efd1
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 118 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
Changelog
---------

v2.2.2
------

(`#169 <https://github.com/OMS-NetZero/FAIR/pull/169>`_) Removed logging messages which were too annoying and slowed down overrides in all but default cases (`#162 <https://github.com/OMS-NetZero/FAIR/issues/162>`_)

v2.2.1
------

Expand Down
43 changes: 2 additions & 41 deletions src/fair/io/fill_from.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
"""Methods for filling data from scenario files."""

import copy
import logging

import numpy as np
import pandas as pd
Expand All @@ -26,8 +25,6 @@
time_convert,
)

logger = logging.getLogger(__name__)


def _check_csv(df, runmode):
# check our three metadata columns are present
Expand Down Expand Up @@ -70,23 +67,11 @@ def _check_csv(df, runmode):
return times


def _bounds_warning(firstlast, runmode, filetime, problemtime):
# Don't raise error if time is out of range because we can still fill in emissions
# by directly modifying attributes, but user might have made a mistake so warn
earlierlater = {"first": "later", "last": "earlier"}
return logger.warning(
f"The {firstlast} time in the {runmode} file ({filetime}) is "
f"{earlierlater[firstlast]} than the {firstlast} time in the problem "
f"definition ({problemtime})."
)


def _parse_unit(unit, specie, is_ghg):
try:
prefix = unit.split()[0]
compound = unit.split()[1].split("/")[0]
time = unit.split()[1].split("/")[1]
logger.debug(f"prefix={prefix}, compound={compound}, time={time}")
except IndexError:
raise UnitParseError(
"Units must be given in the format MASS SPECIE/TIME (with a whitespace "
Expand All @@ -108,12 +93,6 @@ def _parse_unit(unit, specie, is_ghg):
# compound may be novel if it is user defined, but we can't convert it if so. In
# which case add to our desired unit lists to prevent later errors.
if compound not in compound_convert:
logger.warning(
f"{compound} is not in fair's default list of species masses for "
f"{specie}, so I can't convert it. For my non-native species, greenhouse "
"gas emissions are reported in kt/yr, short-lived forcer emissions in "
"Mt/yr, greenhouse gas concentrations in ppt, and forcings in W/m2."
)
if is_ghg:
desired_emissions_units[specie] = f"kt {compound}/yr"
desired_concentration_units[specie] = "ppt"
Expand Down Expand Up @@ -145,11 +124,6 @@ def _concentration_unit_convert(concentration, unit, specie):
f"values, which are {list(mixing_ratio_convert.keys())}."
)
if specie not in desired_concentration_units:
logger.warning(
f"{specie} is not in the default list of greenhouse gases known to fair, "
"so I'm going to convert concentrations to ppt and report back-calculated "
"emissions in kt/yr."
)
desired_concentration_units[specie] = "ppt"
concentration = concentration * (
mixing_ratio_convert[unit][desired_concentration_units[specie]]
Expand Down Expand Up @@ -201,10 +175,6 @@ def fill_from_csv(
df = pd.read_csv(mode_options[mode]["file"])
df.columns = df.columns.str.lower()
times = _check_csv(df, runmode=mode) # list of strings
if float(times[0]) > mode_options[mode]["time"][0]:
_bounds_warning("first", mode, times[0], mode_options[mode]["time"][0])
if float(times[-1]) < self.timepoints[-1]:
_bounds_warning("last", mode, times[-1], mode_options[mode]["time"][-1])
times_array = np.array(times, dtype=float)

for scenario in self.scenarios:
Expand All @@ -218,17 +188,8 @@ def fill_from_csv(
times[0] : times[-1],
].values

# warn if data missing; it might be an error by the user, but
# it's not fatal; we can fill in later
if data_in.shape[0] == 0:
logger.warning(
f"I can't find a value for scenario='{scenario}', "
f"variable='{specie}', region='World' in "
f"{mode_options[mode]['file']} file."
)
continue
# duplicates are ambigious however, and are an error
elif data_in.shape[0] > 1:
# duplicates are ambigious and are an error
if data_in.shape[0] > 1:
raise DuplicateScenarioError(
f"In {mode_options[mode]['file']} there are duplicate "
f"rows for variable='{specie}, scenario='{scenario}'."
Expand Down
23 changes: 0 additions & 23 deletions src/fair/io/param_sets.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,9 @@
"""Methods for filling species and climate config parameters in."""

import logging

import pandas as pd

from ..interface import fill

logger = logging.getLogger(__name__)


energy_balance_parameters = [
"gamma_autocorrelation",
"ocean_heat_capacity",
Expand Down Expand Up @@ -47,17 +42,7 @@ def override_defaults(self, filename):
"""
df_configs = pd.read_csv(filename, index_col=0)
for config in self.configs:
logger.debug("Checking for missing config label")
# warn if config is not present in file; it might be an error by the user, but
# its not fatal; it can still be filled in before calling run()
if config not in df_configs.index:
logger.warning(
f"I can't find a config with label '{config}' in the supplied file "
f"{filename}."
)
continue
for col in df_configs.columns:
logger.debug("Checking whether this is an array")
# should really do some error checking here; hopefully python's default
# errors will be obvious enough if a user made a mistake
if len(col.split("[")) > 1:
Expand All @@ -68,10 +53,8 @@ def override_defaults(self, filename):
param_index = None

if param_name in energy_balance_parameters:
logger.debug(f"Found climate_config parameter {param_name}")
# error checking required?
if param_index is not None:
logger.debug(f"Filling layer {param_index}")
fill(
self.climate_configs[param_name],
df_configs.loc[config, col],
Expand All @@ -86,16 +69,10 @@ def override_defaults(self, filename):
)

else:
logger.debug(f"Found species_config parameter {param_name}")
# error checking required?
if param_index is not None:
if param_index not in self.species:
logger.warning(
f"{param_index} is not a specie defined in this `fair` "
f"instance for column name {col} in {filename}."
)
continue
logger.debug(f"Filling specie {param_index}")
fill(
self.species_configs[param_name],
df_configs.loc[config, col],
Expand Down
62 changes: 15 additions & 47 deletions tests/unit_tests/io/fill_from_test.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
"""Module for testing fill_from functions."""

import logging
import os

import numpy as np
Expand Down Expand Up @@ -91,20 +90,6 @@ def test__check_csv():
)


def test__parse_unit(caplog):
caplog.set_level(logging.DEBUG)
f = minimal_problem_def()

with pytest.raises(UnitParseError):
f.fill_from_csv(emissions_file=os.path.join(TEST_DATA_PATH, "bad-unit.csv"))

with pytest.raises(UnitParseError):
f.fill_from_csv(emissions_file=os.path.join(TEST_DATA_PATH, "bad-prefix.csv"))

with pytest.raises(UnitParseError):
f.fill_from_csv(emissions_file=os.path.join(TEST_DATA_PATH, "bad-time.csv"))


def test__non_default_specie(caplog):
f = FAIR()
species = ["HFC-152", "Hydrogen"]
Expand All @@ -127,12 +112,22 @@ def test__non_default_specie(caplog):
f.define_configs(["UKESM1-0-LL"])
f.allocate()
f.fill_from_csv(emissions_file=os.path.join(TEST_DATA_PATH, "new-specie.csv"))
assert "HFC152 is not in fair's default list" in caplog.text
assert "H2 is not in fair's default list" in caplog.text


def test__concentration_unit_convert(caplog):
caplog.set_level(logging.DEBUG)
def test__parse_unit():
f = minimal_problem_def()

with pytest.raises(UnitParseError):
f.fill_from_csv(emissions_file=os.path.join(TEST_DATA_PATH, "bad-unit.csv"))

with pytest.raises(UnitParseError):
f.fill_from_csv(emissions_file=os.path.join(TEST_DATA_PATH, "bad-prefix.csv"))

with pytest.raises(UnitParseError):
f.fill_from_csv(emissions_file=os.path.join(TEST_DATA_PATH, "bad-time.csv"))


def test__concentration_unit_convert():
f = FAIR()
species = ["CO2"]
properties = {}
Expand Down Expand Up @@ -169,30 +164,10 @@ def test__concentration_unit_convert(caplog):
f.fill_from_csv(
concentration_file=os.path.join(TEST_DATA_PATH, "new-concentration-specie.csv")
)
assert "PF3 is not in the default list of greenhouse gases " in caplog.text


def test__bounds_warning(caplog):
caplog.set_level(logging.DEBUG)
f = minimal_problem_def()
f.fill_from_csv(emissions_file=os.path.join(TEST_DATA_PATH, "bounds-early.csv"))
assert ("The first time in the emissions file " in caplog.text) & (
"is later than the first time in the problem definition" in caplog.text
)


# this one unfinished
def test_fill_from_csv(caplog):
caplog.set_level(logging.DEBUG)
f = minimal_problem_def()
f.fill_from_csv(
emissions_file=os.path.join(TEST_DATA_PATH, "minimal-emissions.csv")
)
assert (
"The last time in the emissions file (1752) is earlier than the last time "
"in the problem definition (1752.5)" in caplog.text
)

def test_fill_from_csv():
f = minimal_problem_def(input_mode="concentration")
f.fill_from_csv(
concentration_file=os.path.join(TEST_DATA_PATH, "minimal-concentration.csv")
Expand All @@ -201,13 +176,6 @@ def test_fill_from_csv(caplog):
f = minimal_problem_def(input_mode="forcing", species=["Solar", "Volcanic"])
f.fill_from_csv(forcing_file=os.path.join(TEST_DATA_PATH, "minimal-forcing.csv"))

f = minimal_problem_def()
f.fill_from_csv(emissions_file=os.path.join(TEST_DATA_PATH, "new-specie.csv"))
assert (
"I can't find a value for scenario='test', variable='CO2', region='World' in"
in caplog.text
)

f = minimal_problem_def()
f.fill_from_csv(
emissions_file=os.path.join(TEST_DATA_PATH, "minimal-emissions.csv")
Expand Down
13 changes: 6 additions & 7 deletions tests/unit_tests/io/param_sets_test.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
"""Module for testing fill_from functions."""

import logging
import os

import pytest

from fair import FAIR
from fair.io import read_properties

Expand Down Expand Up @@ -33,21 +34,19 @@ def minimal_problem_def(configs=["one", "two", "three"]):
return fair_obj


def test_override_defaults(caplog):
caplog.set_level(logging.DEBUG)
def test_override_defaults():
f = minimal_problem_def()
f.fill_from_csv(
emissions_file=os.path.join(EMISSIONS_PATH, "minimal-emissions.csv")
)
f.fill_species_configs()
f.override_defaults(PARAMS_FILE)

# "four" is not in the configs file, so should raise an error
f = minimal_problem_def(configs=["four"])
f.fill_from_csv(
emissions_file=os.path.join(EMISSIONS_PATH, "minimal-emissions.csv")
)
f.fill_species_configs()
f.override_defaults(PARAMS_FILE)
assert (
"I can't find a config with label 'four' in the supplied file " in caplog.text
)
with pytest.raises(KeyError):
f.override_defaults(PARAMS_FILE)

0 comments on commit bb2efd1

Please sign in to comment.