Skip to content

Commit dd8449f

Browse files
lostellaJasper
andauthored
Backports v0.13.8 (#3054)
* Refactor tests for `ev.aggregations` (#3038) * Fix edge cases in metric computation (#3037) * Rotbaum: Add item-id to forecast. (#3049) * Fix mypy checks (#3052) * add init --------- Co-authored-by: Jasper <[email protected]>
1 parent ca40b46 commit dd8449f

File tree

8 files changed

+295
-67
lines changed

8 files changed

+295
-67
lines changed

src/gluonts/ev/aggregations.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,9 @@ class Sum(Aggregation):
4848
partial_result: Optional[Union[List[np.ndarray], np.ndarray]] = None
4949

5050
def step(self, values: np.ndarray) -> None:
51-
summed_values = np.ma.sum(values, axis=self.axis)
51+
assert self.axis is None or isinstance(self.axis, tuple)
52+
53+
summed_values = np.nansum(values, axis=self.axis)
5254

5355
if self.axis is None or 0 in self.axis:
5456
if self.partial_result is None:
@@ -61,9 +63,11 @@ def step(self, values: np.ndarray) -> None:
6163

6264
def get(self) -> np.ndarray:
6365
if self.axis is None or 0 in self.axis:
64-
return np.ma.copy(self.partial_result)
66+
assert isinstance(self.partial_result, np.ndarray)
67+
return np.copy(self.partial_result)
6568

66-
return np.ma.concatenate(self.partial_result)
69+
assert isinstance(self.partial_result, list)
70+
return np.concatenate(self.partial_result)
6771

6872

6973
@dataclass
@@ -100,11 +104,13 @@ def step(self, values: np.ndarray) -> None:
100104
if self.partial_result is None:
101105
self.partial_result = []
102106

103-
mean_values = np.ma.mean(values, axis=self.axis)
107+
mean_values = np.nanmean(values, axis=self.axis)
108+
assert isinstance(self.partial_result, list)
104109
self.partial_result.append(mean_values)
105110

106111
def get(self) -> np.ndarray:
107112
if self.axis is None or 0 in self.axis:
108113
return self.partial_result / self.n
109114

110-
return np.ma.concatenate(self.partial_result)
115+
assert isinstance(self.partial_result, list)
116+
return np.concatenate(self.partial_result)

src/gluonts/ev/metrics.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ def mean(**quantile_losses: np.ndarray) -> np.ndarray:
284284
[quantile_loss for quantile_loss in quantile_losses.values()],
285285
axis=0,
286286
)
287-
return np.ma.mean(stacked_quantile_losses, axis=0)
287+
return np.mean(stacked_quantile_losses, axis=0)
288288

289289
def __call__(self, axis: Optional[int] = None) -> DerivedEvaluator:
290290
return DerivedEvaluator(
@@ -307,7 +307,7 @@ def mean(**quantile_losses: np.ndarray) -> np.ndarray:
307307
[quantile_loss for quantile_loss in quantile_losses.values()],
308308
axis=0,
309309
)
310-
return np.ma.mean(stacked_quantile_losses, axis=0)
310+
return np.mean(stacked_quantile_losses, axis=0)
311311

312312
def __call__(self, axis: Optional[int] = None) -> DerivedEvaluator:
313313
return DerivedEvaluator(
@@ -332,7 +332,7 @@ def mean(
332332
[np.abs(coverages[f"coverage[{q}]"] - q) for q in quantile_levels],
333333
axis=0,
334334
)
335-
return np.ma.mean(intermediate_result, axis=0)
335+
return np.mean(intermediate_result, axis=0)
336336

337337
def __call__(self, axis: Optional[int] = None) -> DerivedEvaluator:
338338
return DerivedEvaluator(

src/gluonts/ext/rotbaum/_predictor.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,12 +50,13 @@ def __init__(
5050
featurized_data: List,
5151
start_date: pd.Period,
5252
prediction_length: int,
53+
item_id: Optional[str] = None,
5354
):
5455
self.models = models
5556
self.featurized_data = featurized_data
5657
self.start_date = start_date
5758
self.prediction_length = prediction_length
58-
self.item_id = None
59+
self.item_id = item_id
5960
self.lead_time = None
6061

6162
def quantile(self, q: float) -> np.ndarray:
@@ -333,6 +334,7 @@ def predict(
333334
[featurized_data],
334335
start_date=forecast_start(ts),
335336
prediction_length=self.prediction_length,
337+
item_id=ts.get("item_id"),
336338
)
337339

338340
def explain(

src/gluonts/model/evaluation.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ def evaluate_forecasts(
203203
)
204204
if index0 is not None:
205205
index0_repeated = np.take(index0, indices=index_arrays[0], axis=0)
206-
index_arrays = (*zip(*index0_repeated), *index_arrays[1:])
206+
index_arrays = (*zip(*index0_repeated), *index_arrays[1:]) # type: ignore
207207
index = pd.MultiIndex.from_arrays(index_arrays)
208208

209209
flattened_metrics = valmap(np.ravel, metrics_values)

src/gluonts/mx/model/deepstate/issm.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
# express or implied. See the License for the specific language governing
1212
# permissions and limitations under the License.
1313

14-
from typing import List, Tuple
14+
from typing import List, Sequence, Tuple
1515

1616
from pandas.tseries.frequencies import to_offset
1717

@@ -31,7 +31,7 @@
3131
)
3232

3333

34-
def _make_block_diagonal(blocks: List[Tensor]) -> Tensor:
34+
def _make_block_diagonal(blocks: Sequence[Tensor]) -> Tensor:
3535
assert (
3636
len(blocks) > 0
3737
), "You need at least one tensor to make a block-diagonal tensor"

test/ev/test_aggregations.py

Lines changed: 90 additions & 55 deletions
Original file line numberDiff line numberDiff line change
@@ -18,84 +18,119 @@
1818
from gluonts.ev import Mean, Sum
1919
from gluonts.itertools import power_set
2020

21-
VALUE_STREAM = [
22-
[
23-
np.full((3, 5), np.nan),
24-
np.full((3, 5), np.nan),
25-
np.full((3, 5), np.nan),
26-
],
27-
[
28-
np.array([[0, np.nan], [0, 0]]),
29-
np.array([[0, 5], [-5, np.nan]]),
30-
],
31-
[
32-
np.full(shape=(3, 3), fill_value=1),
33-
np.full(shape=(1, 3), fill_value=4),
34-
],
35-
]
36-
37-
SUM_RES_AXIS_NONE = [
38-
0,
39-
0,
40-
21,
41-
]
42-
43-
SUM_RES_AXIS_0 = [
44-
np.zeros(5),
45-
np.array([-5, 5]),
46-
np.array([7, 7, 7]),
47-
]
48-
SUM_RES_AXIS_1 = [
49-
np.zeros(9),
50-
np.array([0, 0, 5, -5]),
51-
np.array([3, 3, 3, 12]),
52-
]
53-
54-
55-
MEAN_RES_AXIS_NONE = [
56-
np.nan,
57-
0,
58-
1.75,
59-
]
60-
61-
MEAN_RES_AXIS_0 = [
62-
np.full(5, np.nan),
63-
np.array([-1.25, 2.5]),
64-
np.array([1.75, 1.75, 1.75]),
65-
]
66-
MEAN_RES_AXIS_1 = [
67-
np.full(9, np.nan),
68-
np.array([0, 0, 2.5, -5]),
69-
np.array([1, 1, 1, 4]),
70-
]
71-
7221

7322
@pytest.mark.parametrize(
7423
"value_stream, res_axis_none, res_axis_0, res_axis_1",
75-
zip(VALUE_STREAM, SUM_RES_AXIS_NONE, SUM_RES_AXIS_0, SUM_RES_AXIS_1),
24+
[
25+
(
26+
[
27+
np.full((3, 5), 0.0),
28+
np.full((3, 5), 0.0),
29+
np.full((3, 5), 0.0),
30+
],
31+
0.0,
32+
np.zeros(5),
33+
np.zeros(9),
34+
),
35+
(
36+
np.ma.masked_invalid(
37+
[
38+
np.full((3, 5), np.nan),
39+
np.full((3, 5), np.nan),
40+
np.full((3, 5), np.nan),
41+
]
42+
),
43+
0,
44+
np.zeros(5),
45+
np.zeros(9),
46+
),
47+
(
48+
np.ma.masked_invalid(
49+
[
50+
np.array([[0, np.nan], [0, 0]]),
51+
np.array([[0, 5], [-5, np.nan]]),
52+
]
53+
),
54+
0,
55+
np.array([-5, 5]),
56+
np.array([0, 0, 5, -5]),
57+
),
58+
(
59+
[
60+
np.full(shape=(3, 3), fill_value=1),
61+
np.full(shape=(1, 3), fill_value=4),
62+
],
63+
21,
64+
np.array([7, 7, 7]),
65+
np.array([3, 3, 3, 12]),
66+
),
67+
],
7668
)
7769
def test_Sum(value_stream, res_axis_none, res_axis_0, res_axis_1):
7870
for axis, expected_result in zip(
7971
[None, 0, 1], [res_axis_none, res_axis_0, res_axis_1]
8072
):
8173
sum = Sum(axis=axis)
8274
for values in value_stream:
83-
sum.step(np.ma.masked_invalid(values))
75+
sum.step(values)
8476

8577
np.testing.assert_almost_equal(sum.get(), expected_result)
8678

8779

8880
@pytest.mark.parametrize(
8981
"value_stream, res_axis_none, res_axis_0, res_axis_1",
90-
zip(VALUE_STREAM, MEAN_RES_AXIS_NONE, MEAN_RES_AXIS_0, MEAN_RES_AXIS_1),
82+
[
83+
(
84+
[
85+
np.full((3, 5), 0.0),
86+
np.full((3, 5), 0.0),
87+
np.full((3, 5), 0.0),
88+
],
89+
0.0,
90+
np.zeros(5),
91+
np.zeros(9),
92+
),
93+
(
94+
np.ma.masked_invalid(
95+
[
96+
np.full((3, 5), np.nan),
97+
np.full((3, 5), np.nan),
98+
np.full((3, 5), np.nan),
99+
]
100+
),
101+
np.nan,
102+
np.full(5, np.nan),
103+
np.full(9, np.nan),
104+
),
105+
(
106+
np.ma.masked_invalid(
107+
[
108+
np.array([[0, np.nan], [0, 0]]),
109+
np.array([[0, 5], [-5, np.nan]]),
110+
]
111+
),
112+
0,
113+
np.array([-1.25, 2.5]),
114+
np.array([0, 0, 2.5, -5]),
115+
),
116+
(
117+
[
118+
np.full(shape=(3, 3), fill_value=1),
119+
np.full(shape=(1, 3), fill_value=4),
120+
],
121+
1.75,
122+
np.array([1.75, 1.75, 1.75]),
123+
np.array([1, 1, 1, 4]),
124+
),
125+
],
91126
)
92127
def test_Mean(value_stream, res_axis_none, res_axis_0, res_axis_1):
93128
for axis, expected_result in zip(
94129
[None, 0, 1], [res_axis_none, res_axis_0, res_axis_1]
95130
):
96131
mean = Mean(axis=axis)
97132
for values in value_stream:
98-
mean.step(np.ma.masked_invalid(values))
133+
mean.step(values)
99134

100135
np.testing.assert_almost_equal(mean.get(), expected_result)
101136

0 commit comments

Comments
 (0)