From 6af51d76455f44dc3dbf453d508d4497bfb8c46c Mon Sep 17 00:00:00 2001 From: Kevin Sheppard Date: Mon, 21 Oct 2024 13:45:50 +0100 Subject: [PATCH] CLN: Switch to executed types Switch to executed types to improve docs Fix type aliasing Remove unnecessary aliases Improve xref --- arch/bootstrap/_samplers_python.py | 2 - arch/bootstrap/base.py | 139 +++++------ arch/bootstrap/multiple_comparison.py | 24 +- arch/compat/statsmodels.py | 6 +- arch/covariance/kernel.py | 30 +-- arch/tests/bootstrap/test_bootstrap.py | 6 +- arch/tests/unitroot/test_unitroot.py | 10 +- arch/tests/univariate/test_arch_in_mean.py | 2 +- arch/tests/univariate/test_mean.py | 6 +- arch/tests/univariate/test_recursions.py | 6 +- arch/tests/utility/test_cov.py | 2 - arch/typing.py | 26 +- arch/unitroot/_engle_granger.py | 14 +- arch/unitroot/_phillips_ouliaris.py | 22 +- arch/unitroot/_shared.py | 12 +- arch/unitroot/cointegration.py | 86 +++---- arch/unitroot/critical_values/dfgls.py | 2 - .../unitroot/critical_values/dickey_fuller.py | 2 - .../unitroot/critical_values/engle_granger.py | 2 - arch/unitroot/critical_values/kpss.py | 2 - .../critical_values/phillips_ouliaris.py | 2 - .../simulation/adf_simulation.py | 5 +- ...adf_z_critical_values_simulation_joblib.py | 2 - ...ritical_values_simulation_large_cluster.py | 2 - .../dfgls_critical_values_simulation.py | 8 +- .../phillips-ouliaris-simulation-process.py | 6 +- .../critical_values/simulation/shared.py | 6 +- .../unitroot/critical_values/zivot_andrews.py | 2 - arch/unitroot/unitroot.py | 163 ++++++------ arch/univariate/__init__.py | 2 - arch/univariate/base.py | 173 ++++++------- arch/univariate/distribution.py | 152 ++++++------ arch/univariate/mean.py | 218 ++++++++-------- arch/univariate/recursions.pyi | 6 +- arch/univariate/recursions.pyx | 83 ++++--- arch/univariate/recursions_python.py | 24 +- arch/univariate/volatility.py | 232 ++++++++++-------- arch/utility/__init__.py | 8 +- arch/utility/array.py | 46 ++-- arch/utility/cov.py | 4 +- arch/utility/io.py | 2 - arch/utility/testing.py | 16 +- arch/utility/timeseries.py | 12 +- doc/source/bootstrap/iid-bootstraps.rst | 2 +- doc/source/changes.rst | 3 +- doc/source/changes/4.0.rst | 4 +- doc/source/changes/5.0.rst | 6 +- doc/source/changes/7.0.rst | 5 + doc/source/conf.py | 37 ++- doc/source/covariance/covariance.rst | 10 + doc/source/index.rst | 6 +- .../multiple-comparison-reference.rst | 6 +- doc/source/types.rst | 33 +++ doc/source/univariate/forecasting.rst | 7 + doc/source/univariate/volatility.rst | 4 +- examples/unitroot_examples.ipynb | 1 + examples/univariate_volatility_modeling.ipynb | 1 - 57 files changed, 864 insertions(+), 836 deletions(-) create mode 100644 doc/source/types.rst diff --git a/arch/bootstrap/_samplers_python.py b/arch/bootstrap/_samplers_python.py index a241951cf0..1597763ae1 100644 --- a/arch/bootstrap/_samplers_python.py +++ b/arch/bootstrap/_samplers_python.py @@ -1,5 +1,3 @@ -from __future__ import annotations - from arch.compat.numba import jit from arch.typing import Float64Array, Int64Array diff --git a/arch/bootstrap/base.py b/arch/bootstrap/base.py index 52af31600b..5fde14c86d 100644 --- a/arch/bootstrap/base.py +++ b/arch/bootstrap/base.py @@ -1,7 +1,5 @@ -from __future__ import annotations - from collections.abc import Generator as PyGenerator, Mapping, Sequence -from typing import Any, Callable, Union, cast +from typing import Any, Callable, Optional, Union, cast import warnings import numpy as np @@ -46,8 +44,8 @@ def _get_prng_state( - prng: Generator | RandomState, -) -> RandomStateState | Mapping[str, Any]: + prng: Union[Generator, RandomState], +) -> Union[RandomStateState, Mapping[str, Any]]: if isinstance(prng, Generator): return prng.bit_generator.state else: @@ -56,7 +54,7 @@ def _get_prng_state( def _get_random_integers( - prng: Generator | RandomState, upper: int, *, size: int = 1 + prng: Union[Generator, RandomState], upper: int, *, size: int = 1 ) -> Int64Array: if isinstance(prng, Generator): return prng.integers(upper, size=size, dtype=np.int64) @@ -90,7 +88,7 @@ def _single_optimal_block(x: Float64Array) -> tuple[float, float]: cv = 2 * np.sqrt(np.log10(nobs) / nobs) acv = np.zeros(m_max + 1) abs_acorr = np.zeros(m_max + 1) - opt_m: int | None = None + opt_m: Optional[int] = None for i in range(m_max + 1): v1 = eps[i + 1 :] @ eps[i + 1 :] v2 = eps[: -(i + 1)] @ eps[: -(i + 1)] @@ -118,7 +116,7 @@ def _single_optimal_block(x: Float64Array) -> tuple[float, float]: return b_sb, b_cb -def optimal_block_length(x: ArrayLike1D | ArrayLike2D) -> pd.DataFrame: +def optimal_block_length(x: Union[ArrayLike1D, ArrayLike2D]) -> pd.DataFrame: r""" Estimate optimal window length for time-series bootstraps @@ -230,7 +228,7 @@ def _loo_jackknife( nobs: int, args: Sequence[ArrayLike], kwargs: dict[str, ArrayLike], - extra_kwargs: dict[str, ArrayLike] | None = None, + extra_kwargs: Optional[dict[str, ArrayLike]] = None, ) -> Float64Array: """ Leave one out jackknife estimation @@ -274,7 +272,7 @@ def _loo_jackknife( def _add_extra_kwargs( - kwargs: dict[str, Any], extra_kwargs: dict[str, Any] | None = None + kwargs: dict[str, Any], extra_kwargs: Optional[dict[str, Any]] = None ) -> dict[str, Any]: """ Safely add additional keyword arguments to an existing dictionary @@ -389,13 +387,13 @@ class IIDBootstrap(metaclass=DocStringInheritor): def __init__( self, *args: ArrayLike, - random_state: RandomState | None = None, - seed: None | int | Generator | RandomState = None, + random_state: Optional[RandomState] = None, + seed: Union[int, Generator, RandomState, None] = None, **kwargs: ArrayLike, ) -> None: self._args = list(args) self._kwargs = kwargs - self._generator: RandomState | Generator + self._generator: Union[Generator, RandomState] if random_state is not None: if not isinstance(random_state, RandomState): raise TypeError("random_state must be a RandomState when set.") @@ -408,7 +406,7 @@ def __init__( "RandomState instance using the ``generator`` keyword argument.", FutureWarning, ) - _seed: None | int | RandomState | Generator = random_state + _seed: Union[int, RandomState, Generator, None] = random_state else: _seed = seed @@ -443,17 +441,17 @@ def __init__( self._index: BootstrapIndexT = np.arange(self._num_items) self._parameters: list[int] = [] - self.pos_data: tuple[AnyArray | pd.Series | pd.DataFrame, ...] = args - self.kw_data: dict[str, AnyArray | pd.Series | pd.DataFrame] = kwargs + self.pos_data: tuple[Union[AnyArray, pd.Series, pd.DataFrame], ...] = args + self.kw_data: dict[str, Union[AnyArray, pd.Series, pd.DataFrame]] = kwargs self.data: tuple[ - tuple[AnyArray | pd.Series | pd.DataFrame, ...], - dict[str, AnyArray | pd.Series | pd.DataFrame], + tuple[Union[AnyArray, pd.Series, pd.DataFrame], ...], + dict[str, Union[AnyArray, pd.Series, pd.DataFrame]], ] = (self.pos_data, self.kw_data) - self._base: Float64Array | None = None - self._results: Float64Array | None = None - self._studentized_results: Float64Array | None = None - self._last_func: Callable[..., ArrayLike] | None = None + self._base: Optional[Float64Array] = None + self._results: Optional[Float64Array] = None + self._studentized_results: Optional[Float64Array] = None + self._last_func: Optional[Callable[..., ArrayLike]] = None for key, value in kwargs.items(): attr = getattr(self, key, None) if attr is None: @@ -478,16 +476,10 @@ def _repr_html(self) -> str: return html @property - def generator(self) -> Generator | RandomState: + def generator(self) -> Union[Generator, RandomState]: """ Set or get the instance PRNG - Parameters - ---------- - seed : {Generator, RandomState}, optional - Generator or RandomState used to produce the pseudo-random - values used in the bootstrap - Returns ------- {Generator, RandomState} @@ -497,21 +489,16 @@ def generator(self) -> Generator | RandomState: return self._generator @generator.setter - def generator(self, value: Generator | RandomState) -> None: + def generator(self, value: Union[Generator, RandomState]) -> None: if not isinstance(value, (Generator, RandomState)): raise TypeError("Only a Generator or RandomState can be set") self._generator = value @property - def random_state(self) -> Generator | RandomState: + def random_state(self) -> Union[Generator, RandomState]: """ Set or get the instance random state - Parameters - ---------- - random_state : RandomState - RandomState instance used by bootstrap - Returns ------- RandomState @@ -525,15 +512,15 @@ def random_state(self) -> Generator | RandomState: return self._generator @random_state.setter - def random_state(self, random_state: RandomState) -> None: + def random_state(self, value: RandomState) -> None: warnings.warn( "The random_state property is deprecated and will be removed in a " "future version. Use seed instead", FutureWarning, ) - if not isinstance(random_state, (Generator, RandomState)): + if not isinstance(value, (Generator, RandomState)): raise TypeError("Value being set must be a Generator or a RandomState") - self._generator = random_state + self._generator = value @property def index(self) -> BootstrapIndexT: @@ -543,7 +530,7 @@ def index(self) -> BootstrapIndexT: return self._index @property - def state(self) -> RandomStateState | Mapping[str, Any]: + def state(self) -> Union[RandomStateState, Mapping[str, Any]]: """ Set or get the generator's state @@ -561,7 +548,7 @@ def state(self) -> RandomStateState | Mapping[str, Any]: return self._generator.get_state() @state.setter - def state(self, value: RandomStateState | Mapping[str, Any]) -> None: + def state(self, value: Union[RandomStateState, Mapping[str, Any]]) -> None: if isinstance(self._generator, Generator): assert isinstance(value, Mapping) self._generator.bit_generator.state = value @@ -569,7 +556,7 @@ def state(self, value: RandomStateState | Mapping[str, Any]) -> None: assert isinstance(self._generator, RandomState) self._generator.set_state(cast(RandomStateState, value)) - def get_state(self) -> RandomStateState | Mapping[str, Any]: + def get_state(self) -> Union[RandomStateState, Mapping[str, Any]]: """ Gets the state of the bootstrap's random number generator @@ -585,7 +572,7 @@ def get_state(self) -> RandomStateState | Mapping[str, Any]: ) return _get_prng_state(self._generator) - def set_state(self, state: RandomStateState | dict[str, Any]) -> None: + def set_state(self, state: Union[RandomStateState, dict[str, Any]]) -> None: """ Sets the state of the bootstrap's random number generator @@ -606,7 +593,7 @@ def set_state(self, state: RandomStateState | dict[str, Any]) -> None: assert isinstance(self._generator, RandomState) self._generator.set_state(state) - def seed(self, value: int | list[int] | Uint32Array) -> None: + def seed(self, value: Union[int, list[int], Uint32Array]) -> None: """ Reseeds the bootstrap's random number generator @@ -695,12 +682,12 @@ def conf_int( ] = "basic", size: float = 0.95, tail: Literal["two", "upper", "lower"] = "two", - extra_kwargs: dict[str, Any] | None = None, + extra_kwargs: Optional[dict[str, Any]] = None, reuse: bool = False, sampling: Literal[ "nonparametric", "semi-parametric", "semi", "parametric", "semiparametric" ] = "nonparametric", - std_err_func: Callable[..., ArrayLike] | None = None, + std_err_func: Optional[Callable[..., ArrayLike]] = None, studentize_reps: int = 1000, ) -> Float64Array: """ @@ -950,7 +937,7 @@ def _bca_bias(self) -> Float64Array: return b[:, None] def _bca_acceleration( - self, func: Callable[..., Float64Array], extra_kwags: dict[str, Any] | None + self, func: Callable[..., Float64Array], extra_kwags: Optional[dict[str, Any]] ) -> float: nobs = self._num_items jk_params = _loo_jackknife(func, nobs, self._args, self._kwargs, extra_kwags) @@ -959,9 +946,9 @@ def _bca_acceleration( def clone( self, *args: ArrayLike, - seed: None | int | Generator | RandomState = None, + seed: Union[int, Generator, RandomState, None] = None, **kwargs: ArrayLike, - ) -> IIDBootstrap: + ) -> "IIDBootstrap": """ Clones the bootstrap using different data with a fresh prng. @@ -976,7 +963,7 @@ def clone( Returns ------- - bs + type[self] Bootstrap instance """ bs = self.__class__(*args, random_state=None, seed=seed, **kwargs) @@ -986,7 +973,7 @@ def apply( self, func: Callable[..., ArrayLike], reps: int = 1000, - extra_kwargs: dict[str, Any] | None = None, + extra_kwargs: Optional[dict[str, Any]] = None, ) -> Float64Array: """ Applies a function to bootstrap replicated data @@ -1047,8 +1034,8 @@ def _construct_bootstrap_estimates( self, func: Callable[..., ArrayLike], reps: int, - extra_kwargs: dict[str, Any] | None = None, - std_err_func: Callable[..., ArrayLike] | None = None, + extra_kwargs: Optional[dict[str, Any]] = None, + std_err_func: Optional[Callable[..., ArrayLike]] = None, studentize_reps: int = 0, sampling: Literal[ "nonparametric", "semi-parametric", "semi", "parametric", "semiparametric" @@ -1110,8 +1097,8 @@ def cov( func: Callable[..., ArrayLike], reps: int = 1000, recenter: bool = True, - extra_kwargs: dict[str, Any] | None = None, - ) -> float | Float64Array: + extra_kwargs: Optional[dict[str, Any]] = None, + ) -> Union[float, Float64Array]: """ Compute parameter covariance using bootstrap @@ -1191,8 +1178,8 @@ def var( func: Callable[..., ArrayLike], reps: int = 1000, recenter: bool = True, - extra_kwargs: dict[str, Any] | None = None, - ) -> float | Float64Array: + extra_kwargs: Optional[dict[str, Any]] = None, + ) -> Union[float, Float64Array]: """ Compute parameter variance using bootstrap @@ -1281,7 +1268,7 @@ def _resample(self) -> tuple[tuple[ArrayLike, ...], dict[str, ArrayLike]]: Resample all data using the values in _index """ indices = cast(Union[Int64Array, tuple[Int64Array, ...]], self._index) - pos_data: list[NDArray | pd.Series | pd.DataFrame] = [] + pos_data: list[Union[NDArray, pd.DataFrame, pd.Series]] = [] for values in self._args: if isinstance(values, (pd.Series, pd.DataFrame)): assert isinstance(indices, NDArray) @@ -1289,7 +1276,7 @@ def _resample(self) -> tuple[tuple[ArrayLike, ...], dict[str, ArrayLike]]: else: assert isinstance(values, np.ndarray) pos_data.append(values[indices]) - named_data: dict[str, NDArray | pd.Series | pd.DataFrame] = {} + named_data: dict[str, Union[NDArray, pd.DataFrame, pd.Series]] = {} for key, values in self._kwargs.items(): if isinstance(values, (pd.Series, pd.DataFrame)): assert isinstance(indices, NDArray) @@ -1381,8 +1368,8 @@ class IndependentSamplesBootstrap(IIDBootstrap): def __init__( self, *args: ArrayLike, - random_state: RandomState | None = None, - seed: None | int | Generator | RandomState = None, + random_state: Optional[RandomState] = None, + seed: Union[int, Generator, RandomState, None] = None, **kwargs: ArrayLike, ) -> None: super().__init__(*args, random_state=random_state, seed=seed, **kwargs) @@ -1450,14 +1437,14 @@ def _resample(self) -> tuple[tuple[ArrayLike, ...], dict[str, ArrayLike]]: pos_indices, kw_indices = cast( tuple[list[Int64Array], dict[str, Int64Array]], self._index ) - pos_data: list[np.ndarray | pd.DataFrame | pd.Series] = [] + pos_data: list[Union[NDArray, pd.DataFrame, pd.Series]] = [] for i, values in enumerate(self._args): if isinstance(values, (pd.Series, pd.DataFrame)): pos_data.append(values.iloc[pos_indices[i]]) else: assert isinstance(values, np.ndarray) pos_data.append(values[pos_indices[i]]) - named_data: dict[str, pd.DataFrame | pd.Series | AnyArray] = {} + named_data: dict[str, Union[AnyArray, pd.Series, pd.DataFrame]] = {} for key, values in self._kwargs.items(): idx = kw_indices[key] if isinstance(values, (pd.Series, pd.DataFrame)): @@ -1559,8 +1546,8 @@ def __init__( self, block_size: int, *args: ArrayLike, - random_state: RandomState | None = None, - seed: None | int | Generator | RandomState = None, + random_state: Optional[RandomState] = None, + seed: Union[int, Generator, RandomState, None] = None, **kwargs: ArrayLike, ) -> None: super().__init__(*args, random_state=random_state, seed=seed, **kwargs) @@ -1570,9 +1557,9 @@ def __init__( def clone( self, *args: ArrayLike, - seed: None | int | Generator | RandomState = None, + seed: Union[int, Generator, RandomState, None] = None, **kwargs: ArrayLike, - ) -> CircularBlockBootstrap: + ) -> "CircularBlockBootstrap": """ Clones the bootstrap using different data with a fresh prng. @@ -1711,8 +1698,8 @@ def __init__( self, block_size: int, *args: ArrayLike, - random_state: RandomState | None = None, - seed: None | int | Generator | RandomState = None, + random_state: Optional[RandomState] = None, + seed: Union[int, Generator, RandomState, None] = None, **kwargs: ArrayLike, ) -> None: super().__init__( @@ -1821,8 +1808,8 @@ def __init__( self, block_size: int, *args: ArrayLike, - random_state: RandomState | None = None, - seed: None | int | Generator | RandomState = None, + random_state: Optional[RandomState] = None, + seed: Union[int, Generator, RandomState, None] = None, **kwargs: ArrayLike, ) -> None: super().__init__( @@ -1849,8 +1836,8 @@ def __init__( self, block_size: int, *args: ArrayLike, - random_state: RandomState | None = None, - seed: None | int | Generator | RandomState = None, + random_state: Optional[RandomState] = None, + seed: Union[int, Generator, RandomState, None] = None, **kwargs: ArrayLike, ) -> None: super().__init__(*args, random_state=random_state, seed=seed, **kwargs) @@ -1858,7 +1845,7 @@ def __init__( def update_indices( self, - ) -> ( - Int64Array | tuple[list[Int64Array], dict[str, Int64Array]] - ): # pragma: no cover + ) -> Union[ + Int64Array, tuple[list[Int64Array], dict[str, Int64Array]] + ]: # pragma: no cover raise NotImplementedError diff --git a/arch/bootstrap/multiple_comparison.py b/arch/bootstrap/multiple_comparison.py index 19aafc0358..b777e6f77c 100644 --- a/arch/bootstrap/multiple_comparison.py +++ b/arch/bootstrap/multiple_comparison.py @@ -1,8 +1,6 @@ -from __future__ import annotations - from collections.abc import Hashable, Sequence import copy -from typing import cast +from typing import Optional, Union, cast import warnings import numpy as np @@ -69,7 +67,7 @@ def reset(self) -> None: """ self.bootstrap.reset() - def seed(self, value: int | list[int] | Uint32Array) -> None: + def seed(self, value: Union[int, list[int], Uint32Array]) -> None: """ Seed the bootstrap's random number generator @@ -126,13 +124,13 @@ def __init__( losses: ArrayLike2D, size: float, reps: int = 1000, - block_size: int | None = None, + block_size: Optional[int] = None, method: Literal["R", "max"] = "R", bootstrap: Literal[ "stationary", "sb", "circular", "cbb", "moving block", "mbb" ] = "stationary", *, - seed: None | int | np.random.Generator | np.random.RandomState = None, + seed: Union[int, np.random.Generator, np.random.RandomState, None] = None, ) -> None: super().__init__() self.losses = ensure2d(losses, "losses") @@ -422,7 +420,7 @@ def __init__( benchmark: ArrayLike, models: ArrayLike, size: float = 0.05, - block_size: int | None = None, + block_size: Optional[int] = None, reps: int = 1000, bootstrap: Literal[ "stationary", "sb", "circular", "cbb", "moving block", "mbb" @@ -430,7 +428,7 @@ def __init__( studentize: bool = True, nested: bool = False, *, - seed: None | int | np.random.Generator | np.random.RandomState = None, + seed: Union[int, np.random.Generator, np.random.RandomState, None] = None, ) -> None: super().__init__() self.benchmark = ensure2d(benchmark, "benchmark") @@ -450,7 +448,7 @@ def __init__( self.k: int = self.models.shape[1] self.reps: int = reps self.size: float = size - self._superior_models: list[Hashable] | None = None + self._superior_models: Optional[list[Hashable]] = None self.bootstrap: CircularBlockBootstrap = self.spa.bootstrap self._model = "StepM" @@ -571,7 +569,7 @@ def __init__( self, benchmark: ArrayLike, models: ArrayLike, - block_size: int | None = None, + block_size: Optional[int] = None, reps: int = 1000, bootstrap: Literal[ "stationary", "sb", "circular", "cbb", "moving block", "mbb" @@ -579,7 +577,7 @@ def __init__( studentize: bool = True, nested: bool = False, *, - seed: None | int | np.random.Generator | np.random.RandomState = None, + seed: Union[int, np.random.Generator, np.random.RandomState, None] = None, ) -> None: super().__init__() self.benchmark = ensure2d(benchmark, "benchmark") @@ -613,7 +611,7 @@ def __init__( self._seed = seed self.bootstrap: CircularBlockBootstrap = bootstrap_inst self._pvalues: dict[str, float] = {} - self._simulated_vals: Float64Array | None = None + self._simulated_vals: Optional[Float64Array] = None self._selector = np.ones(self.k, dtype=np.bool_) self._model = "SPA" if self.studentize: @@ -783,7 +781,7 @@ def better_models( self, pvalue: float = 0.05, pvalue_type: Literal["lower", "consistent", "upper"] = "consistent", - ) -> Int64Array | list[Hashable]: + ) -> Union[Int64Array, list[Hashable]]: """ Returns set of models rejected as being equal-or-worse than the benchmark diff --git a/arch/compat/statsmodels.py b/arch/compat/statsmodels.py index 8f84685658..765e3cc86d 100644 --- a/arch/compat/statsmodels.py +++ b/arch/compat/statsmodels.py @@ -1,12 +1,10 @@ -from __future__ import annotations - -from typing import Any +from typing import Any, Union from numpy import recarray from pandas import DataFrame -def dataset_loader(dataset: Any) -> recarray | DataFrame: +def dataset_loader(dataset: Any) -> Union[recarray, DataFrame]: """Load a dataset using the new syntax is possible""" try: return dataset.load(as_pandas=True).data diff --git a/arch/covariance/kernel.py b/arch/covariance/kernel.py index c43b5d7041..9239c1f009 100644 --- a/arch/covariance/kernel.py +++ b/arch/covariance/kernel.py @@ -1,10 +1,8 @@ -from __future__ import annotations - from arch.compat.numba import jit from abc import ABC, abstractmethod from functools import cached_property -from typing import SupportsInt, cast +from typing import Optional, SupportsInt, Union, cast import numpy as np from pandas import DataFrame, Index @@ -62,7 +60,7 @@ class CovarianceEstimate: long_run : ndarray, default None The long-run covariance estimate. If not provided, computed from short_run and one_sided_strict. - one_sided_strict : ndarray, default None + one_sided : ndarray, default None The one-sided-strict covariance estimate. If not provided, computed from short_run and one_sided_strict. @@ -86,9 +84,9 @@ def __init__( self, short_run: Float64Array, one_sided_strict: Float64Array, - columns: Index | list[str] | None = None, - long_run: Float64Array | None = None, - one_sided: Float64Array | None = None, + columns: Union[Index, list[str], None] = None, + long_run: Optional[Float64Array] = None, + one_sided: Optional[Float64Array] = None, ) -> None: self._sr = short_run self._oss = one_sided_strict @@ -96,13 +94,13 @@ def __init__( self._long_run = long_run self._one_sided = one_sided - def _wrap(self, value: Float64Array) -> Float64Array | DataFrame: + def _wrap(self, value: Float64Array) -> Union[Float64Array, DataFrame]: if self._columns is not None: return DataFrame(value, columns=self._columns, index=self._columns) return value @cached_property - def long_run(self) -> Float64Array | DataFrame: + def long_run(self) -> Union[Float64Array, DataFrame]: """ The long-run covariance estimate. """ @@ -113,14 +111,14 @@ def long_run(self) -> Float64Array | DataFrame: return self._wrap(long_run) @cached_property - def short_run(self) -> Float64Array | DataFrame: + def short_run(self) -> Union[Float64Array, DataFrame]: """ The short-run covariance estimate. """ return self._wrap(self._sr) @cached_property - def one_sided(self) -> Float64Array | DataFrame: + def one_sided(self) -> Union[Float64Array, DataFrame]: """ The one-sided covariance estimate. """ @@ -131,7 +129,7 @@ def one_sided(self) -> Float64Array | DataFrame: return self._wrap(one_sided) @cached_property - def one_sided_strict(self) -> Float64Array | DataFrame: + def one_sided_strict(self) -> Union[Float64Array, DataFrame]: """ The one-sided strict covariance estimate. """ @@ -139,7 +137,9 @@ def one_sided_strict(self) -> Float64Array | DataFrame: @jit(nopython=True) -def _cov_jit(df: int, k: int, num_weights: int, w: np.ndarray, x: np.ndarray) -> np.ndarray: +def _cov_jit( + df: int, k: int, num_weights: int, w: np.ndarray, x: np.ndarray +) -> np.ndarray: oss = np.zeros((k, k)) for i in range(1, num_weights): oss += w[i] * (x[i:].T @ x[:-i]) / df @@ -185,10 +185,10 @@ class CovarianceEstimator(ABC): def __init__( self, x: ArrayLike, - bandwidth: float | None = None, + bandwidth: Optional[float] = None, df_adjust: int = 0, center: bool = True, - weights: ArrayLike | None = None, + weights: Optional[ArrayLike] = None, force_int: bool = False, ): self._x_orig = ensure2d(x, "x") diff --git a/arch/tests/bootstrap/test_bootstrap.py b/arch/tests/bootstrap/test_bootstrap.py index 836b1bbbf9..b9aadc268f 100644 --- a/arch/tests/bootstrap/test_bootstrap.py +++ b/arch/tests/bootstrap/test_bootstrap.py @@ -1,7 +1,5 @@ -from __future__ import annotations - import copy -from typing import Callable, NamedTuple +from typing import Callable, NamedTuple, Union import warnings import numpy as np @@ -44,7 +42,7 @@ class BSData(NamedTuple): y_series: pd.Series z_df: pd.DataFrame - func: Callable[[np.ndarray, int], float | np.ndarray] + func: Callable[[np.ndarray, int], Union[float, np.ndarray]] @pytest.fixture(scope="function", params=[1234, "gen", "rs"]) diff --git a/arch/tests/unitroot/test_unitroot.py b/arch/tests/unitroot/test_unitroot.py index bd1a2e3f64..f447866730 100644 --- a/arch/tests/unitroot/test_unitroot.py +++ b/arch/tests/unitroot/test_unitroot.py @@ -1,11 +1,9 @@ # TODO: Tests for features that are just called # TODO: Test for trend='ctt' -from __future__ import annotations - from arch.compat.statsmodels import dataset_loader import os -from typing import NamedTuple +from typing import NamedTuple, Optional import warnings import numpy as np @@ -526,10 +524,10 @@ def test_kpss_data_dependent_lags(data, trend, lags): class ZATestResult(NamedTuple): stat: float pvalue: float - lags: int | None + lags: Optional[int] trend: str - max_lags: int | None - method: str | None + max_lags: Optional[int] + method: Optional[str] actual_lags: int diff --git a/arch/tests/univariate/test_arch_in_mean.py b/arch/tests/univariate/test_arch_in_mean.py index d160d2bf72..9cc1e793bd 100644 --- a/arch/tests/univariate/test_arch_in_mean.py +++ b/arch/tests/univariate/test_arch_in_mean.py @@ -18,7 +18,7 @@ SP500 = 100 * sp500.load()["Adj Close"].pct_change().dropna() SP500 = SP500.iloc[SP500.shape[0] // 2 :] X = pd.concat([SP500, SP500], axis=1) -X.columns = pd.Index([0, 1]) +X.columns = [0, 1] RANDOMSTATE = np.random.RandomState(12349876) X.loc[:, :] = RANDOMSTATE.standard_normal(X.shape) diff --git a/arch/tests/univariate/test_mean.py b/arch/tests/univariate/test_mean.py index c1478f30e9..9dff8262d0 100644 --- a/arch/tests/univariate/test_mean.py +++ b/arch/tests/univariate/test_mean.py @@ -293,7 +293,7 @@ def test_har(self, constant): direct = pd.DataFrame( index=np.arange(t), columns=["h." + str(i + 1) for i in range(6)], - dtype="float64", + dtype=float, ) params = np.asarray(res.params) @@ -325,7 +325,7 @@ def test_har(self, constant): direct = pd.DataFrame( index=self.y_series.index, columns=["h." + str(i + 1) for i in range(6)], - dtype="float64", + dtype=float, ) forecasts = res.forecast(horizon=6) params = np.asarray(res.params) @@ -415,7 +415,7 @@ def test_ar(self): direct = pd.DataFrame( index=np.arange(y.shape[0]), columns=["h." + str(i + 1) for i in range(5)], - dtype="float64", + dtype=float, ) params = res.params.iloc[:-1] for i in range(2, y.shape[0]): diff --git a/arch/tests/univariate/test_recursions.py b/arch/tests/univariate/test_recursions.py index ca063cc9b3..b5cf74f4a9 100644 --- a/arch/tests/univariate/test_recursions.py +++ b/arch/tests/univariate/test_recursions.py @@ -72,11 +72,7 @@ def display(self): print(self.first_name + ": " + f"{1000 * self.times[0]:0.3f} ms") print(self.second_name + ": " + f"{1000 * self.times[1]:0.3f} ms") if self.ratio < 1: - print( - "{} is {:0.1f}% faster".format( - self.first_name, 100 * (1 / self.ratio - 1) - ) - ) + print(f"{self.first_name} is {100 * (1 / self.ratio - 1):0.1f}% faster") else: print(f"{self.second_name} is {100 * (self.ratio - 1):0.1f}% faster") print(self.first_name + "/" + self.second_name + f" Ratio: {self.ratio:0.3f}\n") diff --git a/arch/tests/utility/test_cov.py b/arch/tests/utility/test_cov.py index 2c79864a34..b68bf4df3b 100644 --- a/arch/tests/utility/test_cov.py +++ b/arch/tests/utility/test_cov.py @@ -1,5 +1,3 @@ -from __future__ import annotations - from arch.compat.statsmodels import dataset_loader from numpy import diff, log diff --git a/arch/typing.py b/arch/typing.py index d76f84ab36..13b67c394b 100644 --- a/arch/typing.py +++ b/arch/typing.py @@ -1,15 +1,10 @@ -from __future__ import annotations - from collections.abc import Hashable import datetime as dt -from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, TypeVar, Union +from typing import Any, Callable, Literal, Optional, TypeVar, Union import numpy as np from pandas import DataFrame, Series, Timestamp -NP_GTE_121 = np.lib.NumpyVersion(np.__version__) >= np.lib.NumpyVersion("1.21.0") - - __all__ = [ "NDArray", "ArrayLike", @@ -36,18 +31,13 @@ ] NDArray = Union[np.ndarray] -if NP_GTE_121 and TYPE_CHECKING: - Float64Array = np.ndarray[Any, np.dtype[np.float64]] # pragma: no cover - Int64Array = np.ndarray[Any, np.dtype[np.int64]] # pragma: no cover - Int32Array = np.ndarray[Any, np.dtype[np.int32]] # pragma: no cover - IntArray = np.ndarray[Any, np.dtype[np.int_]] # pragma: no cover - BoolArray = np.ndarray[Any, np.dtype[np.bool_]] # pragma: no cover - AnyArray = np.ndarray[Any, Any] # pragma: no cover - Uint32Array = np.ndarray[Any, np.dtype[np.uint32]] # pragma: no cover -else: - Uint32Array = IntArray = Float64Array = Int64Array = Int32Array = BoolArray = ( - AnyArray - ) = NDArray +Float64Array = np.ndarray[Any, np.dtype[np.double]] # pragma: no cover +Int64Array = np.ndarray[Any, np.dtype[np.longlong]] # pragma: no cover +Int32Array = np.ndarray[Any, np.dtype[np.intc]] # pragma: no cover +IntArray = np.ndarray[Any, np.dtype[np.int_]] # pragma: no cover +BoolArray = np.ndarray[Any, np.dtype[np.bool_]] # pragma: no cover +AnyArray = np.ndarray[Any, Any] # pragma: no cover +Uint32Array = np.ndarray[Any, np.dtype[np.uintc]] # pragma: no cover BootstrapIndexT = Union[ Int64Array, tuple[Int64Array, ...], tuple[list[Int64Array], dict[str, Int64Array]] diff --git a/arch/unitroot/_engle_granger.py b/arch/unitroot/_engle_granger.py index 58efd4660c..cd90698da4 100644 --- a/arch/unitroot/_engle_granger.py +++ b/arch/unitroot/_engle_granger.py @@ -1,4 +1,4 @@ -from __future__ import annotations +from typing import Optional, Union import numpy as np import pandas as pd @@ -29,10 +29,10 @@ def engle_granger( x: ArrayLike2D, trend: UnitRootTrend = "c", *, - lags: int | None = None, - max_lags: int | None = None, + lags: Optional[int] = None, + max_lags: Optional[int] = None, method: Literal["aic", "bic", "t-stat"] = "bic", -) -> EngleGrangerTestResults: +) -> "EngleGrangerTestResults": r""" Test for cointegration within a set of time series. @@ -143,8 +143,8 @@ def __init__( alternative: str = "Cointegration", trend: str = "c", order: int = 2, - adf: ADF | None = None, - xsection: RegressionResults | None = None, + adf: Optional[ADF] = None, + xsection: Optional[RegressionResults] = None, ) -> None: super().__init__( stat, pvalue, crit_vals, null, alternative, trend, order, xsection @@ -167,7 +167,7 @@ def lags(self) -> int: return self._adf.lags @property - def max_lags(self) -> int | None: + def max_lags(self) -> Union[int, None]: """The maximum number of lags used in the lag-length selection.""" return self._adf.max_lags diff --git a/arch/unitroot/_phillips_ouliaris.py b/arch/unitroot/_phillips_ouliaris.py index 775ab1a406..886a7569b2 100644 --- a/arch/unitroot/_phillips_ouliaris.py +++ b/arch/unitroot/_phillips_ouliaris.py @@ -1,6 +1,4 @@ -from __future__ import annotations - -from typing import cast +from typing import Optional, cast import numpy as np import pandas as pd @@ -9,7 +7,7 @@ from statsmodels.iolib.table import SimpleTable from statsmodels.regression.linear_model import RegressionResults -import arch.covariance.kernel as lrcov +from arch.covariance.kernel import CovarianceEstimator from arch.typing import ArrayLike1D, ArrayLike2D, Literal, UnitRootTrend from arch.unitroot._shared import ( KERNEL_ERR, @@ -42,9 +40,9 @@ def _po_ptests( test_type: Literal["Pu", "Pz"], trend: UnitRootTrend, kernel: str, - bandwidth: int | None, + bandwidth: Optional[int], force_int: bool, -) -> PhillipsOuliarisTestResults: +) -> "PhillipsOuliarisTestResults": nobs = z.shape[0] z_lead = z.iloc[1:] z_lag = add_trend(z.iloc[:-1], trend=trend) @@ -94,9 +92,9 @@ def _po_ztests( test_type: Literal["Za", "Zt"], trend: UnitRootTrend, kernel: str, - bandwidth: int | None, + bandwidth: Optional[int], force_int: bool, -) -> PhillipsOuliarisTestResults: +) -> "PhillipsOuliarisTestResults": # Za and Zt tests u = np.asarray(xsection.resid)[:, None] nobs = u.shape[0] @@ -139,9 +137,9 @@ def phillips_ouliaris( *, test_type: Literal["Za", "Zt", "Pu", "Pz"] = "Zt", kernel: str = "bartlett", - bandwidth: int | None = None, + bandwidth: Optional[int] = None, force_int: bool = False, -) -> PhillipsOuliarisTestResults: +) -> "PhillipsOuliarisTestResults": r""" Test for cointegration within a set of time series. @@ -321,9 +319,9 @@ def __init__( alternative: str = "Cointegration", trend: str = "c", order: int = 2, - xsection: RegressionResults | None = None, + xsection: Optional[RegressionResults] = None, test_type: str = "Za", - kernel_est: lrcov.CovarianceEstimator | None = None, + kernel_est: Optional[CovarianceEstimator] = None, rho: float = 0.0, ) -> None: super().__init__( diff --git a/arch/unitroot/_shared.py b/arch/unitroot/_shared.py index cf3baaff24..3b6baf642a 100644 --- a/arch/unitroot/_shared.py +++ b/arch/unitroot/_shared.py @@ -1,6 +1,4 @@ -from __future__ import annotations - -from typing import Any, NamedTuple, cast +from typing import Any, NamedTuple, Optional, Union, cast import pandas as pd from statsmodels.iolib.summary import Summary @@ -67,7 +65,7 @@ def _check_cointegrating_regression( def _cross_section( - y: ArrayLike1D | ArrayLike2D, x: ArrayLike2D, trend: UnitRootTrend + y: Union[ArrayLike1D, ArrayLike2D], x: ArrayLike2D, trend: UnitRootTrend ) -> RegressionResults: if trend not in ("n", "c", "ct", "ctt"): raise ValueError('trend must be one of "n", "c", "ct" or "ctt"') @@ -184,7 +182,7 @@ def __init__( alternative: str = "Cointegration", trend: str = "c", order: int = 2, - xsection: RegressionResults | None = None, + xsection: Optional[RegressionResults] = None, ) -> None: super().__init__(stat, pvalue, crit_vals, null, alternative) self.name = "NONE" @@ -221,8 +219,8 @@ def resid(self) -> pd.Series: return resid def plot( - self, axes: plt.Axes | None = None, title: str | None = None - ) -> plt.Figure: + self, axes: Optional["plt.Axes"] = None, title: Optional[str] = None + ) -> "plt.Figure": """ Plot the cointegration residuals. diff --git a/arch/unitroot/cointegration.py b/arch/unitroot/cointegration.py index ba4570e012..cd8c39dc18 100644 --- a/arch/unitroot/cointegration.py +++ b/arch/unitroot/cointegration.py @@ -1,17 +1,17 @@ -from __future__ import annotations - from collections.abc import Sequence from functools import cached_property +from typing import Optional import numpy as np import pandas as pd +from pandas import DataFrame, Series from pandas.util._decorators import Appender, Substitution from scipy import stats from statsmodels.iolib.summary import Summary, fmt_2cols, fmt_params from statsmodels.iolib.table import SimpleTable from statsmodels.regression.linear_model import OLS, RegressionResults -import arch.covariance.kernel as lrcov +from arch.covariance.kernel import CovarianceEstimate, CovarianceEstimator from arch.typing import ArrayLike1D, ArrayLike2D, Float64Array, Literal, UnitRootTrend from arch.unitroot._engle_granger import EngleGrangerTestResults, engle_granger from arch.unitroot._phillips_ouliaris import ( @@ -45,10 +45,10 @@ class _CommonCointegrationResults: def __init__( self, - params: pd.Series, - cov: pd.DataFrame, - resid: pd.Series, - kernel_est: lrcov.CovarianceEstimator, + params: Series, + cov: DataFrame, + resid: Series, + kernel_est: CovarianceEstimator, num_x: int, trend: UnitRootTrend, df_adjust: bool, @@ -71,31 +71,31 @@ def __init__( self._estimator_type = estimator_type @property - def params(self) -> pd.Series: + def params(self) -> Series: """The estimated parameters of the cointegrating vector""" return self._params.iloc[: self._ci_size] @cached_property - def std_errors(self) -> pd.Series: + def std_errors(self) -> Series: """ Standard errors of the parameters in the cointegrating vector """ se = np.sqrt(np.diag(self.cov)) - return pd.Series(se, index=self.params.index, name="std_errors") + return Series(se, index=self.params.index, name="std_errors") @cached_property - def tvalues(self) -> pd.Series: + def tvalues(self) -> Series: """ T-statistics of the parameters in the cointegrating vector """ - return pd.Series(self.params / self.std_errors, name="tvalues") + return Series(self.params / self.std_errors, name="tvalues") @cached_property - def pvalues(self) -> pd.Series: + def pvalues(self) -> Series: """ P-value of the parameters in the cointegrating vector """ - return pd.Series(2 * (1 - stats.norm.cdf(np.abs(self.tvalues))), name="pvalues") + return Series(2 * (1 - stats.norm.cdf(np.abs(self.tvalues))), name="pvalues") @property def cov(self) -> pd.DataFrame: @@ -103,7 +103,7 @@ def cov(self) -> pd.DataFrame: return self._cov.iloc[: self._ci_size, : self._ci_size] @property - def resid(self) -> pd.Series: + def resid(self) -> Series: """The model residuals""" return self._resid @@ -128,7 +128,7 @@ def rsquared_adj(self) -> float: return self._rsquared_adj @cached_property - def _cov_est(self) -> lrcov.CovarianceEstimate: + def _cov_est(self) -> CovarianceEstimate: r = np.asarray(self._resid) kern_class = self._kernel_est.__class__ bw = self._bandwidth @@ -327,13 +327,13 @@ class DynamicOLSResults(_CommonCointegrationResults): def __init__( self, - params: pd.Series, - cov: pd.DataFrame, - resid: pd.Series, + params: Series, + cov: DataFrame, + resid: Series, lags: int, leads: int, cov_type: str, - kernel_est: lrcov.CovarianceEstimator, + kernel_est: CovarianceEstimator, num_x: int, trend: UnitRootTrend, reg_results: RegressionResults, @@ -357,7 +357,7 @@ def __init__( self._ci_size = params.shape[0] - self._num_x * (leads + lags + 1) @property - def full_params(self) -> pd.Series: + def full_params(self) -> Series: """The complete set of parameters, including leads and lags""" return self._params @@ -529,11 +529,11 @@ def __init__( y: ArrayLike1D, x: ArrayLike2D, trend: UnitRootTrend = "c", - lags: int | None = None, - leads: int | None = None, + lags: Optional[int] = None, + leads: Optional[int] = None, common: bool = False, - max_lag: int | None = None, - max_lead: int | None = None, + max_lag: Optional[int] = None, + max_lead: Optional[int] = None, method: Literal["aic", "bic", "hqic"] = "bic", ) -> None: setup = _check_cointegrating_regression(y, x, trend) @@ -684,7 +684,7 @@ def fit( "unadjusted", "homoskedastic", "robust", "kernel" ] = "unadjusted", kernel: str = "bartlett", - bandwidth: int | None = None, + bandwidth: Optional[int] = None, force_int: bool = False, df_adjust: bool = False, ) -> DynamicOLSResults: @@ -762,7 +762,7 @@ def fit( cov, est = self._cov( cov_type, kernel, bandwidth, force_int, df_adjust, rhs, resid ) - params = pd.Series(np.squeeze(coeffs), index=rhs.columns, name="params") + params = Series(np.squeeze(coeffs), index=rhs.columns, name="params") num_x = self._x.shape[1] return DynamicOLSResults( params, @@ -782,12 +782,12 @@ def fit( def _cov( cov_type: Literal["unadjusted", "homoskedastic", "robust", "kernel"], kernel: str, - bandwidth: int | None, + bandwidth: Optional[int], force_int: bool, df_adjust: bool, rhs: pd.DataFrame, - resids: pd.Series, - ) -> tuple[pd.DataFrame, lrcov.CovarianceEstimator]: + resids: Series, + ) -> tuple[pd.DataFrame, CovarianceEstimator]: """Estimate the covariance""" kernel = kernel.lower().replace("-", "").replace("_", "") if kernel not in KERNEL_ESTIMATORS: @@ -817,11 +817,11 @@ def _cov( class CointegrationAnalysisResults(_CommonCointegrationResults): def __init__( self, - params: pd.Series, - cov: pd.DataFrame, - resid: pd.Series, + params: Series, + cov: DataFrame, + resid: Series, omega_112: float, - kernel_est: lrcov.CovarianceEstimator, + kernel_est: CovarianceEstimator, num_x: int, trend: UnitRootTrend, df_adjust: bool, @@ -983,7 +983,7 @@ def __init__( y: ArrayLike1D, x: ArrayLike2D, trend: UnitRootTrend = "c", - x_trend: UnitRootTrend | None = None, + x_trend: Optional[UnitRootTrend] = None, ) -> None: setup = _check_cointegrating_regression(y, x, trend) self._y = setup.y @@ -993,8 +993,8 @@ def __init__( self._y_df = pd.DataFrame(self._y) def _common_fit( - self, kernel: str, bandwidth: float | None, force_int: bool, diff: bool - ) -> tuple[lrcov.CovarianceEstimator, Float64Array, Float64Array]: + self, kernel: str, bandwidth: Optional[float], force_int: bool, diff: bool + ) -> tuple[CovarianceEstimator, Float64Array, Float64Array]: kernel = _check_kernel(kernel) res = _cross_section(self._y, self._x, self._trend) x = np.asarray(self._x) @@ -1023,7 +1023,7 @@ def _common_fit( beta = np.asarray(res.params)[: x.shape[1]] return cov_est, eta, beta - def _final_statistics(self, theta: pd.Series) -> tuple[pd.Series, float, float]: + def _final_statistics(self, theta: Series) -> tuple[Series, float, float]: z = add_trend(self._x, self._trend) nobs, nvar = z.shape resid = self._y - np.asarray(z @ theta) @@ -1043,7 +1043,7 @@ def _final_statistics(self, theta: pd.Series) -> tuple[pd.Series, float, float]: def fit( self, kernel: str = "bartlett", - bandwidth: float | None = None, + bandwidth: Optional[float] = None, force_int: bool = True, diff: bool = False, df_adjust: bool = False, @@ -1112,7 +1112,7 @@ def fit( zpz_inv = np.linalg.inv(zpz) param_cov = omega_112 * zpz_inv cols = z_df.columns - params_s = pd.Series(params.squeeze(), index=cols, name="params") + params_s = Series(params.squeeze(), index=cols, name="params") param_cov = pd.DataFrame(param_cov, columns=cols, index=cols) resid, r2, r2_adj = self._final_statistics(params_s) resid_kern = KERNEL_ESTIMATORS[kernel]( @@ -1141,7 +1141,7 @@ def __init__( y: ArrayLike1D, x: ArrayLike2D, trend: UnitRootTrend = "c", - x_trend: UnitRootTrend | None = None, + x_trend: Optional[UnitRootTrend] = None, ) -> None: super().__init__(y, x, trend, x_trend) @@ -1149,7 +1149,7 @@ def __init__( def fit( self, kernel: str = "bartlett", - bandwidth: float | None = None, + bandwidth: Optional[float] = None, force_int: bool = True, diff: bool = False, df_adjust: bool = False, @@ -1184,7 +1184,7 @@ def fit( with_trend = add_trend(self._x.iloc[:10], self._trend) assert isinstance(with_trend, pd.DataFrame) cols = with_trend.columns - params = pd.Series(params.squeeze(), index=cols, name="params") + params = Series(params.squeeze(), index=cols, name="params") param_cov = pd.DataFrame(param_cov, columns=cols, index=cols) resid, r2, r2_adj = self._final_statistics(params) resid_kern = KERNEL_ESTIMATORS[kernel]( diff --git a/arch/unitroot/critical_values/dfgls.py b/arch/unitroot/critical_values/dfgls.py index b3a218745d..470e2282fe 100644 --- a/arch/unitroot/critical_values/dfgls.py +++ b/arch/unitroot/critical_values/dfgls.py @@ -6,8 +6,6 @@ simulation. See dfgls_critival_values_simulation for implementation. """ -from __future__ import annotations - from numpy import array dfgls_cv_approx = { diff --git a/arch/unitroot/critical_values/dickey_fuller.py b/arch/unitroot/critical_values/dickey_fuller.py index 113f65917f..ef9bc5ee68 100644 --- a/arch/unitroot/critical_values/dickey_fuller.py +++ b/arch/unitroot/critical_values/dickey_fuller.py @@ -7,8 +7,6 @@ simulation. """ -from __future__ import annotations - from numpy import array, asarray, inf from arch.typing import Float64Array diff --git a/arch/unitroot/critical_values/engle_granger.py b/arch/unitroot/critical_values/engle_granger.py index c445f53c84..e257eb7864 100644 --- a/arch/unitroot/critical_values/engle_granger.py +++ b/arch/unitroot/critical_values/engle_granger.py @@ -1,5 +1,3 @@ -from __future__ import annotations - import numpy as np eg_num_variables = np.arange(1, 13) diff --git a/arch/unitroot/critical_values/kpss.py b/arch/unitroot/critical_values/kpss.py index e81e3494ad..c3c4626e28 100644 --- a/arch/unitroot/critical_values/kpss.py +++ b/arch/unitroot/critical_values/kpss.py @@ -1,5 +1,3 @@ -from __future__ import annotations - from numpy import asarray kpss_critical_values = {} diff --git a/arch/unitroot/critical_values/phillips_ouliaris.py b/arch/unitroot/critical_values/phillips_ouliaris.py index 7b1cfdee77..3ddda66e3d 100644 --- a/arch/unitroot/critical_values/phillips_ouliaris.py +++ b/arch/unitroot/critical_values/phillips_ouliaris.py @@ -11,8 +11,6 @@ P-type statistics with trend ctt based on 13,250,000 simulations """ -from __future__ import annotations - from math import inf CV_PARAMETERS = { diff --git a/arch/unitroot/critical_values/simulation/adf_simulation.py b/arch/unitroot/critical_values/simulation/adf_simulation.py index 3905e2c8e0..f48dd504b4 100644 --- a/arch/unitroot/critical_values/simulation/adf_simulation.py +++ b/arch/unitroot/critical_values/simulation/adf_simulation.py @@ -1,7 +1,6 @@ -from __future__ import annotations - import os import platform +from typing import Union from numpy import arange, array, cumsum, dot, ones, vstack from numpy.linalg import pinv @@ -67,7 +66,7 @@ def adf_simulation( n: int, trend: UnitRootTrend, b: int, - rng: None | RandomState | Generator = None, + rng: Union[None, RandomState, Generator] = None, ) -> float: """ Simulates the empirical distribution of the ADF z-test statistic diff --git a/arch/unitroot/critical_values/simulation/adf_z_critical_values_simulation_joblib.py b/arch/unitroot/critical_values/simulation/adf_z_critical_values_simulation_joblib.py index ed5c8df942..579187d47d 100644 --- a/arch/unitroot/critical_values/simulation/adf_z_critical_values_simulation_joblib.py +++ b/arch/unitroot/critical_values/simulation/adf_z_critical_values_simulation_joblib.py @@ -2,8 +2,6 @@ Simulation of ADF z-test critical values. Closely follows MacKinnon (2010). """ -from __future__ import annotations - import argparse import os import random diff --git a/arch/unitroot/critical_values/simulation/adf_z_critical_values_simulation_large_cluster.py b/arch/unitroot/critical_values/simulation/adf_z_critical_values_simulation_large_cluster.py index 827aef88d5..f55359162b 100644 --- a/arch/unitroot/critical_values/simulation/adf_z_critical_values_simulation_large_cluster.py +++ b/arch/unitroot/critical_values/simulation/adf_z_critical_values_simulation_large_cluster.py @@ -11,8 +11,6 @@ scale well with 128 or more engines. """ -from __future__ import annotations - import datetime import time from typing import cast diff --git a/arch/unitroot/critical_values/simulation/dfgls_critical_values_simulation.py b/arch/unitroot/critical_values/simulation/dfgls_critical_values_simulation.py index 055ee1420f..b294553eed 100644 --- a/arch/unitroot/critical_values/simulation/dfgls_critical_values_simulation.py +++ b/arch/unitroot/critical_values/simulation/dfgls_critical_values_simulation.py @@ -4,10 +4,8 @@ best when joblib is installed. """ -from __future__ import annotations - import datetime -from typing import cast +from typing import Optional, cast import numpy as np from numpy.linalg import pinv @@ -49,7 +47,7 @@ def wrapper(n: int, trend: Literal["c", "ct"], b: int, seed: int = 0) -> np.ndar def dfgsl_simulation( - n: int, trend: Literal["c", "ct"], b: int, rng: RandomState | None = None + n: int, trend: Literal["c", "ct"], b: int, rng: Optional[RandomState] = None ) -> float: """ Simulates the empirical distribution of the DFGLS test statistic @@ -138,7 +136,7 @@ def dfgsl_simulation( results = np.zeros((len(percentiles), len(T), EX_NUM)) for i in range(EX_NUM): - print("Experiment Number {} of {} " "(trend {})".format(i + 1, EX_NUM, tr)) + print(f"Experiment Number {i + 1} of {EX_NUM} " "(trend {tr})") now = datetime.datetime.now() parallel, p_func, n_jobs = parallel_func( wrapper, n_jobs=NUM_JOBS, verbose=2 diff --git a/arch/unitroot/critical_values/simulation/phillips-ouliaris-simulation-process.py b/arch/unitroot/critical_values/simulation/phillips-ouliaris-simulation-process.py index 4f7a9532dc..c7bfad4a0c 100644 --- a/arch/unitroot/critical_values/simulation/phillips-ouliaris-simulation-process.py +++ b/arch/unitroot/critical_values/simulation/phillips-ouliaris-simulation-process.py @@ -1,10 +1,8 @@ -from __future__ import annotations - from collections import defaultdict import glob from itertools import product import os -from typing import NamedTuple, cast +from typing import NamedTuple, Union, cast from black import FileMode, TargetVersion, format_file_contents import matplotlib.backends.backend_pdf @@ -96,7 +94,7 @@ def estimate_cv_regression( return out, float(tau.min()) -def fit_pval_model(quantiles: pd.DataFrame | pd.Series) -> PvalueResult: +def fit_pval_model(quantiles: Union[pd.DataFrame, pd.Series]) -> PvalueResult: percentiles = quantiles.index.to_numpy() lhs = stats.norm.ppf(percentiles) data = np.asarray(quantiles) diff --git a/arch/unitroot/critical_values/simulation/shared.py b/arch/unitroot/critical_values/simulation/shared.py index 19f30da686..077e3bc01f 100644 --- a/arch/unitroot/critical_values/simulation/shared.py +++ b/arch/unitroot/critical_values/simulation/shared.py @@ -1,7 +1,5 @@ -from __future__ import annotations - from collections.abc import Sequence -from typing import Any, NamedTuple +from typing import Any, NamedTuple, Union import numpy as np import pandas as pd @@ -55,7 +53,7 @@ def estimate_cv_regression( def fit_pval_model( - quantiles: pd.Series | pd.DataFrame, + quantiles: Union[pd.Series, pd.DataFrame], small_order: int = 3, use_log: bool = False, drop_insignif: bool = True, diff --git a/arch/unitroot/critical_values/zivot_andrews.py b/arch/unitroot/critical_values/zivot_andrews.py index 2db8d267d6..eeaaeefc01 100644 --- a/arch/unitroot/critical_values/zivot_andrews.py +++ b/arch/unitroot/critical_values/zivot_andrews.py @@ -8,8 +8,6 @@ replications and 2000 data points. """ -from __future__ import annotations - from numpy import array # constant-only model diff --git a/arch/unitroot/unitroot.py b/arch/unitroot/unitroot.py index 80c21851f4..c00c0dcca7 100644 --- a/arch/unitroot/unitroot.py +++ b/arch/unitroot/unitroot.py @@ -1,8 +1,6 @@ -from __future__ import annotations - from abc import ABCMeta, abstractmethod from collections.abc import Sequence -from typing import cast +from typing import Optional, Union, cast import warnings from numpy import ( @@ -19,7 +17,6 @@ diag, diff, empty, - float64, full, hstack, inf, @@ -121,7 +118,9 @@ } -def _is_reduced_rank(x: Float64Array | DataFrame) -> tuple[bool, int | None]: +def _is_reduced_rank( + x: Union[Float64Array, DataFrame] +) -> tuple[bool, Union[int, None]]: """ Check if a matrix has reduced rank preferring quick checks """ @@ -236,11 +235,11 @@ def _autolag_ols_low_memory( trendx.append(empty((nobs, 0))) else: if "tt" in trend: - tt = arange(1, nobs + 1, dtype=float64)[:, None] ** 2 + tt = arange(1, nobs + 1, dtype=float)[:, None] ** 2 tt *= sqrt(5) / float(nobs) ** (5 / 2) trendx.append(tt) if "t" in trend: - t = arange(1, nobs + 1, dtype=float64)[:, None] + t = arange(1, nobs + 1, dtype=float)[:, None] t *= sqrt(3) / float(nobs) ** (3 / 2) trendx.append(t) if trend.startswith("c"): @@ -360,7 +359,7 @@ def _autolag_ols( def _df_select_lags( y: Float64Array, trend: Literal["n", "c", "ct", "ctt"], - max_lags: int | None, + max_lags: Optional[int], method: Literal["aic", "bic", "t-stat"], low_memory: bool = False, ) -> tuple[float, int]: @@ -481,9 +480,9 @@ class UnitRootTest(metaclass=ABCMeta): def __init__( self, y: ArrayLike, - lags: int | None, - trend: UnitRootTrend | Literal["t"], - valid_trends: list[str] | tuple[str, ...], + lags: Optional[int], + trend: Union[UnitRootTrend, Literal["t"]], + valid_trends: Union[list[str], tuple[str, ...]], ) -> None: self._y = ensure1d(y, "y", series=False) self._delta_y = diff(y) @@ -495,9 +494,9 @@ def __init__( if trend not in self.valid_trends: raise ValueError("trend not understood") self._trend = trend - self._stat: float | None = None + self._stat: Optional[float] = None self._critical_values: dict[str, float] = {} - self._pvalue: float | None = None + self._pvalue: Optional[float] = None self._null_hypothesis = "The process contains a unit root." self._alternative_hypothesis = "The process is weakly stationary." self._test_name = "" @@ -696,7 +695,9 @@ class ADF(UnitRootTest, metaclass=AbstractDocStringInheritor): If the p-value is close to significant, then the critical values should be used to judge whether to reject the null. - The autolag option and maxlag for it are described in Greene. + The autolag option and maxlag for it are described in Greene [1]_. + See Hamilton [2]_ for more on ADF tests. Critical value simulation based + on MacKinnon [3]_ abd [4]_. Examples -------- @@ -706,31 +707,31 @@ class ADF(UnitRootTest, metaclass=AbstractDocStringInheritor): >>> data = sm.datasets.macrodata.load().data >>> inflation = np.diff(np.log(data["cpi"])) >>> adf = ADF(inflation) - >>> print("{0:0.4f}".format(adf.stat)) + >>> print(f"{adf.stat:0.4f}") -3.0931 - >>> print("{0:0.4f}".format(adf.pvalue)) + >>> print(f"{adf.pvalue:0.4f}") 0.0271 >>> adf.lags 2 >>> adf.trend="ct" - >>> print("{0:0.4f}".format(adf.stat)) + >>> print(f"{adf.stat:0.4f}") -3.2111 - >>> print("{0:0.4f}".format(adf.pvalue)) + >>> print(f"{adf.pvalue:0.4f}") 0.0822 References ---------- - .. [*] Greene, W. H. 2011. Econometric Analysis. Prentice Hall: Upper + .. [1] Greene, W. H. 2011. Econometric Analysis. Prentice Hall: Upper Saddle River, New Jersey. - .. [*] Hamilton, J. D. 1994. Time Series Analysis. Princeton: Princeton + .. [2] Hamilton, J. D. 1994. Time Series Analysis. Princeton: Princeton University Press. - .. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution + .. [3] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for unit-root and cointegration bootstrap. `Journal of Business and Economic Statistics` 12, 167-76. - .. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." + .. [4] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's University, Dept of Economics, Working Papers. Available at https://ideas.repec.org/p/qed/wpaper/1227.html """ @@ -738,11 +739,11 @@ class ADF(UnitRootTest, metaclass=AbstractDocStringInheritor): def __init__( self, y: ArrayLike, - lags: int | None = None, + lags: Optional[int] = None, trend: UnitRootTrend = "c", - max_lags: int | None = None, + max_lags: Optional[int] = None, method: Literal["aic", "bic", "t-stat"] = "aic", - low_memory: bool | None = None, + low_memory: Optional[bool] = None, ) -> None: valid_trends = ("n", "c", "ct", "ctt") super().__init__(y, lags, trend, valid_trends) @@ -807,7 +808,7 @@ def regression(self) -> RegressionResults: return self._regression @property - def max_lags(self) -> int | None: + def max_lags(self) -> Union[int, None]: """Sets or gets the maximum lags used when automatically selecting lag length""" return self._max_lags @@ -815,7 +816,7 @@ def max_lags(self) -> int | None: class DFGLS(UnitRootTest, metaclass=AbstractDocStringInheritor): """ - Elliott, Rothenberg and Stock's ([1]_) GLS detrended Dickey-Fuller + Elliott, Rothenberg and Stock's ([ers]_) GLS detrended Dickey-Fuller Parameters ---------- @@ -834,7 +835,7 @@ class DFGLS(UnitRootTest, metaclass=AbstractDocStringInheritor): max_lags : int, optional The maximum number of lags to use when selecting lag length. When using automatic lag length selection, the lag is selected using OLS - detrending rather than GLS detrending ([2]_). + detrending rather than GLS detrending ([pq]_). method : {"AIC", "BIC", "t-stat"}, optional The method to use when selecting the lag length @@ -853,7 +854,7 @@ class DFGLS(UnitRootTest, metaclass=AbstractDocStringInheritor): is used before a trend-less ADF regression is run. Critical values and p-values when trend is "c" are identical to - the ADF. When trend is set to "ct", they are from ... + the ADF. When trend is set to "ct", they are from novel simulations. Examples -------- @@ -863,23 +864,23 @@ class DFGLS(UnitRootTest, metaclass=AbstractDocStringInheritor): >>> data = sm.datasets.macrodata.load().data >>> inflation = np.diff(np.log(data["cpi"])) >>> dfgls = DFGLS(inflation) - >>> print("{0:0.4f}".format(dfgls.stat)) + >>> print(f"{dfgls.stat:0.4f}") -2.7611 - >>> print("{0:0.4f}".format(dfgls.pvalue)) + >>> print(f"{dfgls.pvalue:0.4f}") 0.0059 >>> dfgls.lags 2 - >>> dfgls.trend = "ct" - >>> print("{0:0.4f}".format(dfgls.stat)) + >>> dfgls = DFGLS(inflation, trend = "ct") + >>> print(f"{dfgls.stat:0.4f}") -2.9036 - >>> print("{0:0.4f}".format(dfgls.pvalue)) + >>> print(f"{dfgls.pvalue:0.4f}") 0.0447 References ---------- - .. [1] Elliott, G. R., T. J. Rothenberg, and J. H. Stock. 1996. Efficient + .. [ers] Elliott, G. R., T. J. Rothenberg, and J. H. Stock. 1996. Efficient bootstrap for an autoregressive unit root. Econometrica 64: 813-836 - .. [2] Perron, P., & Qu, Z. (2007). A simple modification to improve the + .. [pq] Perron, P., & Qu, Z. (2007). A simple modification to improve the finite sample properties of Ng and Perron's unit root tests. Economics letters, 94(1), 12-19. """ @@ -887,11 +888,11 @@ class DFGLS(UnitRootTest, metaclass=AbstractDocStringInheritor): def __init__( self, y: ArrayLike, - lags: int | None = None, + lags: Optional[int] = None, trend: Literal["c", "ct"] = "c", - max_lags: int | None = None, + max_lags: Optional[int] = None, method: Literal["aic", "bic", "t-stat"] = "aic", - low_memory: bool | None = None, + low_memory: Optional[bool] = None, ) -> None: valid_trends = ("c", "ct") super().__init__(y, lags, trend, valid_trends) @@ -979,7 +980,7 @@ def regression(self) -> RegressionResults: return self._regression @property - def max_lags(self) -> int | None: + def max_lags(self) -> Union[int, None]: """Sets or gets the maximum lags used when automatically selecting lag length""" return self._max_lags @@ -1021,8 +1022,12 @@ class PhillipsPerron(UnitRootTest, metaclass=AbstractDocStringInheritor): correlation in the regression errors is accounted for using a long-run variance estimator (currently Newey-West). - The p-values are obtained through regression surface approximation from - MacKinnon (1994) using the updated 2010 tables. + See Philips and Perron for details [3]_. See Hamilton [1]_ for more on + PP tests. Newey and West contains information about long-run variance + estimation [2]_. The p-values are obtained through regression surface + approximation using the mathodology of MacKinnon [4]_ and [5]_, only + using many more simulations. + If the p-value is close to significant, then the critical values should be used to judge whether to reject the null. @@ -1034,40 +1039,40 @@ class PhillipsPerron(UnitRootTest, metaclass=AbstractDocStringInheritor): >>> data = sm.datasets.macrodata.load().data >>> inflation = np.diff(np.log(data["cpi"])) >>> pp = PhillipsPerron(inflation) - >>> print("{0:0.4f}".format(pp.stat)) + >>> print(f"{pp.stat:0.4f}") -8.1356 - >>> print("{0:0.4f}".format(pp.pvalue)) + >>> print(f"{pp.pvalue:0.4f}") 0.0000 >>> pp.lags 15 >>> pp.trend = "ct" - >>> print("{0:0.4f}".format(pp.stat)) + >>> print(f"{pp.stat:0.4f}") -8.2022 - >>> print("{0:0.4f}".format(pp.pvalue)) + >>> print(f"{pp.pvalue:0.4f}") 0.0000 >>> pp.test_type = "rho" - >>> print("{0:0.4f}".format(pp.stat)) + >>> print(f"{pp.stat:0.4f}") -120.3271 - >>> print("{0:0.4f}".format(pp.pvalue)) + >>> print(f"{pp.pvalue:0.4f}") 0.0000 References ---------- - .. [*] Hamilton, J. D. 1994. Time Series Analysis. Princeton: Princeton + .. [1] Hamilton, J. D. 1994. Time Series Analysis. Princeton: Princeton University Press. - .. [*] Newey, W. K., and K. D. West. 1987. "A simple, positive + .. [2] Newey, W. K., and K. D. West. 1987. "A simple, positive semidefinite, heteroskedasticity and autocorrelation consistent covariance matrix". Econometrica 55, 703-708. - .. [*] Phillips, P. C. B., and P. Perron. 1988. "Testing for a unit root in + .. [3] Phillips, P. C. B., and P. Perron. 1988. "Testing for a unit root in time series regression". Biometrika 75, 335-346. - .. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution + .. [4] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for unit-root and cointegration bootstrap". Journal of Business and Economic Statistics. 12, 167-76. - .. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." + .. [5] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's University, Dept of Economics, Working Papers. Available at https://ideas.repec.org/p/qed/wpaper/1227.html """ @@ -1075,7 +1080,7 @@ class PhillipsPerron(UnitRootTest, metaclass=AbstractDocStringInheritor): def __init__( self, y: ArrayLike, - lags: int | None = None, + lags: Optional[int] = None, trend: Literal["n", "c", "ct"] = "c", test_type: Literal["tau", "rho"] = "tau", ) -> None: @@ -1213,7 +1218,9 @@ class KPSS(UnitRootTest, metaclass=AbstractDocStringInheritor): The p-values and critical values were computed using an extensive simulation based on 100,000,000 replications using series with 2,000 - observations. + observations. See [3]_ for the initial description of the KPSS test. + Further details are available in [2]_ and [5]_. Details about the long-run + covariance estimation can be found in [1]_ and [4]_. Examples -------- @@ -1223,38 +1230,38 @@ class KPSS(UnitRootTest, metaclass=AbstractDocStringInheritor): >>> data = sm.datasets.macrodata.load().data >>> inflation = np.diff(np.log(data["cpi"])) >>> kpss = KPSS(inflation) - >>> print("{0:0.4f}".format(kpss.stat)) + >>> print(f"{kpss.stat:0.4f}") 0.2870 - >>> print("{0:0.4f}".format(kpss.pvalue)) + >>> print(f"{kpss.pvalue:0.4f}") 0.1473 >>> kpss.trend = "ct" - >>> print("{0:0.4f}".format(kpss.stat)) + >>> print(f"{kpss.stat:0.4f}") 0.2075 - >>> print("{0:0.4f}".format(kpss.pvalue)) + >>> print(f"{kpss.pvalue:0.4f}") 0.0128 References ---------- - .. [*] Andrews, D.W.K. (1991). "Heteroskedasticity and autocorrelation + .. [1] Andrews, D.W.K. (1991). "Heteroskedasticity and autocorrelation consistent covariance matrix estimation". Econometrica, 59: 817-858. - .. [*] Hobijn, B., Frances, B.H., & Ooms, M. (2004). Generalizations + .. [2] Hobijn, B., Frances, B.H., & Ooms, M. (2004). Generalizations of the KPSS-test for stationarity. Statistica Neerlandica, 52: 483-502. - .. [*] Kwiatkowski, D.; Phillips, P. C. B.; Schmidt, P.; Shin, Y. (1992). + .. [3] Kwiatkowski, D.; Phillips, P. C. B.; Schmidt, P.; Shin, Y. (1992). "Testing the null hypothesis of stationarity against the alternative of a unit root". Journal of Econometrics 54 (1-3), 159-178 - .. [*] Newey, W.K., & West, K.D. (1994). "Automatic lag selection in + .. [4] Newey, W.K., & West, K.D. (1994). "Automatic lag selection in covariance matrix estimation". Review of Economic Studies, 61: 631-653. - .. [*] Schwert, G. W. (1989). "Tests for unit roots: A Monte Carlo + .. [5] Schwert, G. W. (1989). "Tests for unit roots: A Monte Carlo investigation". Journal of Business and Economic Statistics, 7 (2): 147-159. """ def __init__( - self, y: ArrayLike, lags: int | None = None, trend: Literal["c", "ct"] = "c" + self, y: ArrayLike, lags: Optional[int] = None, trend: Literal["c", "ct"] = "c" ) -> None: valid_trends = ("c", "ct") if lags is None: @@ -1271,7 +1278,7 @@ def __init__( self._test_name = "KPSS Stationarity Test" self._null_hypothesis = "The process is weakly stationary." self._alternative_hypothesis = "The process contains a unit root." - self._resids: ArrayLike1D | None = None + self._resids: Union[ArrayLike1D, None] = None def _check_specification(self) -> None: trend_order = len(self._trend) @@ -1400,17 +1407,20 @@ class ZivotAndrews(UnitRootTest, metaclass=AbstractDocStringInheritor): No attempt has been made to characterize the size/power trade-off. + Based on the description in Zivot and Andrews [3]_. See [2]_ for + a general discussion of unit root tests. Code tested against Baum [1]_. + References ---------- - .. [*] Baum, C.F. (2004). ZANDREWS: Stata module to calculate Zivot-Andrews + .. [1] Baum, C.F. (2004). ZANDREWS: Stata module to calculate Zivot-Andrews unit root test in presence of structural break," Statistical Software Components S437301, Boston College Department of Economics, revised 2015. - .. [*] Schwert, G.W. (1989). Tests for unit roots: A Monte Carlo + .. [2] Schwert, G.W. (1989). Tests for unit roots: A Monte Carlo investigation. Journal of Business & Economic Statistics, 7: 147-159. - .. [*] Zivot, E., and Andrews, D.W.K. (1992). Further evidence on the great + .. [3] Zivot, E., and Andrews, D.W.K. (1992). Further evidence on the great crash, the oil-price shock, and the unit-root hypothesis. Journal of Business & Economic Studies, 10: 251-270. """ @@ -1418,10 +1428,10 @@ class ZivotAndrews(UnitRootTest, metaclass=AbstractDocStringInheritor): def __init__( self, y: ArrayLike, - lags: int | None = None, + lags: Optional[int] = None, trend: Literal["c", "ct", "t"] = "c", trim: float = 0.15, - max_lags: int | None = None, + max_lags: Optional[int] = None, method: Literal["aic", "bic", "t-stat"] = "aic", ) -> None: super().__init__(y, lags, trend, ("c", "t", "ct")) @@ -1594,6 +1604,7 @@ class VarianceRatio(UnitRootTest, metaclass=AbstractDocStringInheritor): The null hypothesis of a VR is that the process is a random walk, possibly plus drift. Rejection of the null with a positive test statistic indicates the presence of positive serial correlation in the time series. + See [1]_ for details about variance ratio testing. Examples -------- @@ -1602,12 +1613,12 @@ class VarianceRatio(UnitRootTest, metaclass=AbstractDocStringInheritor): >>> data = pdr.get_data_fred("DJIA", start="2010-1-1", end="2020-12-31") >>> data = np.log(data.resample("M").last()) # End of month >>> vr = VarianceRatio(data, lags=12) - >>> print("{0:0.4f}".format(vr.pvalue)) + >>> print(f"{vr.pvalue:0.4f}") 0.1370 References ---------- - .. [*] Campbell, John Y., Lo, Andrew W. and MacKinlay, A. Craig. (1997) The + .. [1] Campbell, John Y., Lo, Andrew W. and MacKinlay, A. Craig. (1997) The Econometrics of Financial Markets. Princeton, NJ: Princeton University Press. """ @@ -1631,8 +1642,8 @@ def __init__( self._robust = robust self._debiased = debiased self._overlap = overlap - self._vr: float | None = None - self._stat_variance: float | None = None + self._vr: Optional[float] = None + self._stat_variance: Optional[float] = None quantiles = array([0.01, 0.05, 0.1, 0.9, 0.95, 0.99]) for q, cv in zip(quantiles, norm.ppf(quantiles)): self._critical_values[str(int(100 * q)) + "%"] = cv @@ -1935,7 +1946,7 @@ def kpss_crit( def auto_bandwidth( - y: Sequence[float | int] | ArrayLike1D, + y: Union[Sequence[Union[float, int]], ArrayLike1D], kernel: Literal[ "ba", "bartlett", "nw", "pa", "parzen", "gallant", "qs", "andrews" ] = "ba", diff --git a/arch/univariate/__init__.py b/arch/univariate/__init__.py index 446d0b389e..b4bb335b34 100644 --- a/arch/univariate/__init__.py +++ b/arch/univariate/__init__.py @@ -1,5 +1,3 @@ -from __future__ import annotations - import types from arch.univariate import recursions_python diff --git a/arch/univariate/base.py b/arch/univariate/base.py index 609345bd0b..12108b246b 100644 --- a/arch/univariate/base.py +++ b/arch/univariate/base.py @@ -2,14 +2,12 @@ Core classes for ARCH models """ -from __future__ import annotations - from abc import ABCMeta, abstractmethod from collections.abc import Sequence from copy import deepcopy import datetime as dt from functools import cached_property -from typing import Any, Callable, cast +from typing import Any, Callable, Optional, Union, cast import warnings import numpy as np @@ -181,11 +179,11 @@ class ARCHModel(metaclass=ABCMeta): def __init__( self, - y: ArrayLike | None = None, - volatility: VolatilityProcess | None = None, - distribution: Distribution | None = None, - hold_back: int | None = None, - rescale: bool | None = None, + y: Optional[ArrayLike] = None, + volatility: Optional[VolatilityProcess] = None, + distribution: Optional[Distribution] = None, + hold_back: Optional[int] = None, + rescale: Optional[bool] = None, ) -> None: self._name = "ARCHModel" self._is_pandas = isinstance(y, (pd.DataFrame, pd.Series)) @@ -203,14 +201,14 @@ def __init__( self._fit_indices: list[int] = [0, int(self._y.shape[0])] self._fit_y = self._y - self.hold_back: int | None = hold_back + self.hold_back: Optional[int] = hold_back self._hold_back = 0 if hold_back is None else hold_back - self.rescale: bool | None = rescale + self.rescale: Optional[bool] = rescale self.scale: float = 1.0 - self._backcast: None | float | Float64Array = None - self._var_bounds: Float64Array | None = None + self._backcast: Union[float, Float64Array, None] = None + self._var_bounds: Optional[Float64Array] = None if isinstance(volatility, VolatilityProcess): self._volatility = volatility @@ -261,7 +259,7 @@ def bounds(self) -> list[tuple[float, float]]: return [(-np.inf, np.inf)] * num_params @property - def y(self) -> ArrayLike | None: + def y(self) -> Optional[ArrayLike]: """Returns the dependent variable""" return self._y_original @@ -315,14 +313,14 @@ def _check_scale(self, resids: Float64Array) -> None: self.scale = rescale @abstractmethod - def _scale_changed(self): + def _scale_changed(self) -> None: """ Called when the scale has changed. This allows the model to update any values that are affected by the scale changes, e.g., any logged values. """ - def _r2(self, params: ArrayLike1D) -> None | float: + def _r2(self, params: ArrayLike1D) -> Optional[float]: """ Computes the model r-square. Optional to over-ride. Must match signature. @@ -338,7 +336,7 @@ def _fit_no_arch_normal_errors_params(self) -> Float64Array: @abstractmethod def _fit_no_arch_normal_errors( self, cov_type: Literal["robust", "classic"] = "robust" - ) -> ARCHModelResult: + ) -> "ARCHModelResult": """ Must be overridden with closed form estimator """ @@ -357,8 +355,8 @@ def _static_gaussian_loglikelihood(resids: Float64Array) -> float: def _fit_parameterless_model( self, cov_type: Literal["robust", "classic"], - backcast: float | Float64Array, - ) -> ARCHModelResult: + backcast: Union[float, Float64Array], + ) -> "ARCHModelResult": """ When models have no parameters, fill return values @@ -383,7 +381,7 @@ def _fit_parameterless_model( vol = cast(Float64Array, np.sqrt(vol)) # Reshape resids vol - vol_final = np.empty_like(self._y, dtype=np.float64) + vol_final = np.empty_like(self._y, dtype=np.double) vol_final.fill(np.nan) vol_final[first_obs:last_obs] = vol @@ -414,10 +412,10 @@ def _loglikelihood( self, parameters: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, individual: bool = False, - ) -> float | Float64Array: + ) -> Union[float, Float64Array]: """ Computes the log-likelihood using the entire model @@ -464,7 +462,8 @@ def _all_parameter_names(self) -> list[str]: return names def _parse_parameters( - self, x: ArrayLike1D | Sequence[float] + self, + x: Union[ArrayLike1D, Sequence[float]], ) -> tuple[Float64Array, Float64Array, Float64Array]: """Return the parameters of each model in a tuple""" x = np.asarray(x, dtype=float) @@ -473,10 +472,10 @@ def _parse_parameters( def fix( self, - params: Sequence[float] | ArrayLike1D, - first_obs: int | DateLike | None = None, - last_obs: int | DateLike | None = None, - ) -> ARCHModelFixedResult: + params: Union[ArrayLike1D, Sequence[float]], + first_obs: Union[int, DateLike, None] = None, + last_obs: Union[int, DateLike, None] = None, + ) -> "ARCHModelFixedResult": """ Allows an ARCHModelFixedResult to be constructed from fixed parameters. @@ -524,10 +523,10 @@ def fix( names = self._all_parameter_names() # Reshape resids and vol first_obs, last_obs = self._fit_indices - resids_final = np.empty_like(self._y, dtype=np.float64) + resids_final = np.empty_like(self._y, dtype=np.double) resids_final.fill(np.nan) resids_final[first_obs:last_obs] = resids - vol_final = np.empty_like(self._y, dtype=np.float64) + vol_final = np.empty_like(self._y, dtype=np.double) vol_final.fill(np.nan) vol_final[first_obs:last_obs] = vol @@ -545,7 +544,9 @@ def fix( @abstractmethod def _adjust_sample( - self, first_obs: int | DateLike | None, last_obs: int | DateLike | None + self, + first_obs: Union[int, DateLike, None], + last_obs: Union[int, DateLike, None], ) -> None: """ Performs sample adjustment for estimation @@ -565,16 +566,16 @@ def _adjust_sample( def fit( self, update_freq: int = 1, - disp: bool | Literal["off", "final"] = "final", - starting_values: ArrayLike1D | None = None, + disp: Union[bool, Literal["off"], Literal["final"]] = "final", + starting_values: Optional[ArrayLike1D] = None, cov_type: Literal["robust", "classic"] = "robust", show_warning: bool = True, - first_obs: int | DateLike | None = None, - last_obs: int | DateLike | None = None, - tol: float | None = None, - options: dict[str, Any] | None = None, - backcast: None | float | Float64Array = None, - ) -> ARCHModelResult: + first_obs: Union[int, DateLike, None] = None, + last_obs: Union[int, DateLike, None] = None, + tol: Optional[float] = None, + options: Optional[dict[str, Any]] = None, + backcast: Union[float, Float64Array, None] = None, + ) -> "ARCHModelResult": r""" Estimate model parameters @@ -787,10 +788,10 @@ def fit( names = self._all_parameter_names() # Reshape resids and vol first_obs, last_obs = self._fit_indices - resids_final = np.empty_like(self._y, dtype=np.float64) + resids_final = np.empty_like(self._y, dtype=np.double) resids_final.fill(np.nan) resids_final[first_obs:last_obs] = resids - vol_final = np.empty_like(self._y, dtype=np.float64) + vol_final = np.empty_like(self._y, dtype=np.double) vol_final.fill(np.nan) vol_final[first_obs:last_obs] = vol @@ -846,12 +847,12 @@ def num_params(self) -> int: @abstractmethod def simulate( self, - params: ArrayLike1D | Sequence[float], + params: Union[ArrayLike1D, Sequence[float]], nobs: int, burn: int = 500, - initial_value: float | None = None, - x: ArrayLike | None = None, - initial_value_vol: float | None = None, + initial_value: Optional[float] = None, + x: Optional[ArrayLike] = None, + initial_value_vol: Optional[float] = None, ) -> pd.DataFrame: pass @@ -859,8 +860,8 @@ def simulate( def resids( self, params: Float64Array, - y: ArrayLike1D | None = None, - regressors: ArrayLike2D | None = None, + y: Optional[ArrayLike1D] = None, + regressors: Optional[ArrayLike2D] = None, ) -> ArrayLike1D: """ Compute model residuals @@ -883,7 +884,7 @@ def resids( def compute_param_cov( self, params: Float64Array, - backcast: None | float | Float64Array = None, + backcast: Union[float, Float64Array, None] = None, robust: bool = True, ) -> Float64Array: """ @@ -934,16 +935,16 @@ def forecast( self, params: ArrayLike1D, horizon: int = 1, - start: int | DateLike | None = None, + start: Union[int, DateLike, None] = None, align: Literal["origin", "target"] = "origin", method: ForecastingMethod = "analytic", simulations: int = 1000, - rng: Callable[[int | tuple[int, ...]], Float64Array] | None = None, - random_state: np.random.RandomState | None = None, + rng: Optional[Callable[[Union[int, tuple[int, ...]]], Float64Array]] = None, + random_state: Optional[np.random.RandomState] = None, *, reindex: bool = False, - x: None | dict[Label, ArrayLike] | ArrayLike = None, - ) -> ARCHModelForecast: + x: Union[dict[Label, ArrayLike], ArrayLike, None] = None, + ) -> "ARCHModelForecast": """ Construct forecasts from estimated model @@ -1260,7 +1261,7 @@ def params(self) -> pd.Series: return pd.Series(self._params, index=self._names, name="params") @cached_property - def conditional_volatility(self) -> pd.Series | Float64Array: + def conditional_volatility(self) -> Union[Float64Array, pd.Series]: """ Estimated conditional volatility @@ -1285,7 +1286,7 @@ def nobs(self) -> int: return self._nobs @cached_property - def resid(self) -> Float64Array | pd.Series: + def resid(self) -> Union[Float64Array, pd.Series]: """ Model residuals """ @@ -1295,7 +1296,7 @@ def resid(self) -> Float64Array | pd.Series: return self._resid @cached_property - def std_resid(self) -> Float64Array | pd.Series: + def std_resid(self) -> Union[Float64Array, pd.Series]: """ Residuals standardized by conditional volatility """ @@ -1304,7 +1305,9 @@ def std_resid(self) -> Float64Array | pd.Series: std_res.name = "std_resid" return std_res - def plot(self, annualize: str | None = None, scale: float | None = None) -> Figure: + def plot( + self, annualize: Optional[str] = None, scale: Optional[float] = None + ) -> "Figure": """ Plot standardized residuals and conditional volatility @@ -1381,18 +1384,18 @@ def _set_tight_x(axis: Axes, index: pd.Index) -> None: def forecast( self, - params: ArrayLike1D | None = None, + params: Optional[ArrayLike1D] = None, horizon: int = 1, - start: int | DateLike | None = None, + start: Union[int, DateLike, None] = None, align: Literal["origin", "target"] = "origin", method: ForecastingMethod = "analytic", simulations: int = 1000, - rng: Callable[[int | tuple[int, ...]], Float64Array] | None = None, - random_state: np.random.RandomState | None = None, + rng: Optional[Callable[[Union[int, tuple[int, ...]]], Float64Array]] = None, + random_state: Optional[np.random.RandomState] = None, *, reindex: bool = False, - x: None | dict[Label, ArrayLike] | ArrayLike = None, - ) -> ARCHModelForecast: + x: Union[dict[Label, ArrayLike], ArrayLike, None] = None, + ) -> "ARCHModelForecast": """ Construct forecasts from estimated model @@ -1519,14 +1522,14 @@ def forecast( @deprecate_kwarg("type", "plot_type") def hedgehog_plot( self, - params: ArrayLike1D | None = None, + params: Optional[ArrayLike1D] = None, horizon: int = 10, step: int = 10, - start: int | DateLike | None = None, + start: Union[int, DateLike, None] = None, plot_type: Literal["volatility", "mean"] = "volatility", method: ForecastingMethod = "analytic", simulations: int = 1000, - ) -> Figure: + ) -> "Figure": """ Plot forecasts from estimated model @@ -1636,7 +1639,7 @@ def hedgehog_plot( return fig def arch_lm_test( - self, lags: int | None = None, standardized: bool = False + self, lags: Optional[int] = None, standardized: bool = False ) -> WaldTestStatistic: """ ARCH LM test for conditional heteroskedasticity @@ -1726,7 +1729,7 @@ class ARCHModelResult(ARCHModelFixedResult): def __init__( self, params: Float64Array, - param_cov: Float64Array | None, + param_cov: Optional[Float64Array], r2: float, resid: Float64Array, volatility: Float64Array, @@ -1879,7 +1882,7 @@ def summary(self) -> Summary: row = [] for i, table_val in enumerate(table_vals): val = table_val[pos] - if isinstance(val, (np.float64, float)): + if isinstance(val, (np.double, float)): converted = format_float_fixed(val, *formats[i]) else: converted = val @@ -2015,7 +2018,7 @@ def _align_forecast( def _format_forecasts( - values: Float64Array, index: list[Label] | pd.Index, start_index: int + values: Float64Array, index: Union[list[Label], pd.Index], start_index: int ) -> pd.DataFrame: horizon = values.shape[1] format_str = "{0:>0" + str(int(np.ceil(np.log10(horizon + 0.5)))) + "}" @@ -2041,11 +2044,11 @@ class ARCHModelForecastSimulation: def __init__( self, - index: list[Label] | pd.Index, - values: Float64Array | None, - residuals: Float64Array | None, - variances: Float64Array | None, - residual_variances: Float64Array | None, + index: Union[list[Label], pd.Index], + values: Optional[Float64Array], + residuals: Optional[Float64Array], + variances: Optional[Float64Array], + residual_variances: Optional[Float64Array], ) -> None: self._index = pd.Index(index) self._values = values @@ -2059,29 +2062,29 @@ def index(self) -> pd.Index: return self._index @property - def values(self) -> Float64Array | None: + def values(self) -> Optional[Float64Array]: """The values of the process""" return self._values @property - def residuals(self) -> Float64Array | None: + def residuals(self) -> Optional[Float64Array]: """Simulated residuals used to produce the values""" return self._residuals @property - def variances(self) -> Float64Array | None: + def variances(self) -> Optional[Float64Array]: """Simulated variances of the values""" return self._variances @property - def residual_variances(self) -> Float64Array | None: + def residual_variances(self) -> Optional[Float64Array]: """Simulated variance of the residuals""" return self._residual_variances def _reindex( - a: Float64Array | None, idx: list[Label] | pd.Index -) -> Float64Array | None: + a: Optional[Float64Array], idx: Union[list[Label], pd.Index] +) -> Optional[Float64Array]: if a is None: return a assert a is not None @@ -2112,15 +2115,15 @@ class ARCHModelForecast: def __init__( self, - index: list[Label] | pd.Index, + index: Union[list[Label], pd.Index], start_index: int, mean: Float64Array, variance: Float64Array, residual_variance: Float64Array, - simulated_paths: Float64Array | None = None, - simulated_variances: Float64Array | None = None, - simulated_residual_variances: Float64Array | None = None, - simulated_residuals: Float64Array | None = None, + simulated_paths: Optional[Float64Array] = None, + simulated_variances: Optional[Float64Array] = None, + simulated_residual_variances: Optional[Float64Array] = None, + simulated_residuals: Optional[Float64Array] = None, align: Literal["origin", "target"] = "origin", *, reindex: bool = False, @@ -2169,7 +2172,7 @@ def residual_variance(self) -> pd.DataFrame: return self._residual_variance @property - def simulations(self) -> ARCHModelForecastSimulation: + def simulations(self) -> "ARCHModelForecastSimulation": """ Detailed simulation results if using a simulation-based method diff --git a/arch/univariate/distribution.py b/arch/univariate/distribution.py index 22c62e13f8..2af5f39d6c 100644 --- a/arch/univariate/distribution.py +++ b/arch/univariate/distribution.py @@ -2,11 +2,9 @@ Distributions to use in ARCH models. All distributions must inherit from :class:`Distribution` and provide the same methods with the same inputs. """ -from __future__ import annotations - from abc import ABCMeta, abstractmethod from collections.abc import Sequence -from typing import Callable +from typing import Callable, Optional, Union import warnings from numpy import ( @@ -44,13 +42,13 @@ class Distribution(metaclass=ABCMeta): def __init__( self, - random_state: RandomState | None = None, + random_state: Optional[RandomState] = None, *, - seed: None | int | RandomState | Generator = None, + seed: Union[int, RandomState, Generator, None] = None, ) -> None: self._name = "Distribution" self.num_params: int = 0 - self._parameters: Float64Array | None = None + self._parameters: Optional[Float64Array] = None if random_state is not None: if seed is not None: raise ValueError( @@ -66,11 +64,11 @@ def __init__( "Use seed instead.", FutureWarning, ) - _seed: None | RandomState | Generator | int = random_state + _seed: Union[int, RandomState, Generator, None] = random_state else: _seed = seed if _seed is None: - self._generator: Generator | RandomState = default_rng() + self._generator: Union[Generator, RandomState] = default_rng() elif isinstance(_seed, (int, integer)): self._generator = default_rng(_seed) elif isinstance(_seed, (RandomState, Generator)): @@ -86,7 +84,7 @@ def name(self) -> str: return self._name def _check_constraints( - self, parameters: None | Sequence[float] | ArrayLike1D + self, parameters: Union[Sequence[float], ArrayLike1D, None] ) -> Float64Array: bounds = self.bounds(empty(0)) if parameters is not None: @@ -108,12 +106,12 @@ def _check_constraints( return asarray(params) @property - def generator(self) -> RandomState | Generator: + def generator(self) -> Union[RandomState, Generator]: """The NumPy Generator or RandomState attached to the distribution""" return self._generator @property - def random_state(self) -> RandomState | Generator: + def random_state(self) -> Union[RandomState, Generator]: """ The NumPy RandomState attached to the distribution @@ -129,7 +127,7 @@ def random_state(self) -> RandomState | Generator: return self._generator @abstractmethod - def _simulator(self, size: int | tuple[int, ...]) -> Float64Array: + def _simulator(self, size: Union[int, tuple[int, ...]]) -> Float64Array: """ Simulate i.i.d. draws from the distribution @@ -150,8 +148,8 @@ def _simulator(self, size: int | tuple[int, ...]) -> Float64Array: @abstractmethod def simulate( - self, parameters: int | float | Sequence[float | int] | ArrayLike1D - ) -> Callable[[int | tuple[int, ...]], Float64Array]: + self, parameters: Union[int, float, Sequence[Union[float, int]], ArrayLike1D] + ) -> Callable[[Union[int, tuple[int, ...]]], Float64Array]: """ Simulates i.i.d. draws from the distribution @@ -203,11 +201,11 @@ def bounds(self, resids: Float64Array) -> list[tuple[float, float]]: @abstractmethod def loglikelihood( self, - parameters: Sequence[float] | ArrayLike1D, + parameters: Union[Sequence[float], ArrayLike1D], resids: ArrayLike, sigma2: ArrayLike, individual: bool = False, - ) -> float | Float64Array: + ) -> Union[float , Float64Array]: """ Loglikelihood evaluation. @@ -264,9 +262,9 @@ def parameter_names(self) -> list[str]: @abstractmethod def ppf( self, - pits: float | Sequence[float] | ArrayLike1D, - parameters: None | Sequence[float] | ArrayLike1D = None, - ) -> float | Float64Array: + pits: Union[float, Sequence[float], ArrayLike1D], + parameters: Union[Sequence[float], ArrayLike1D, None] = None, + ) -> Union[float , Float64Array]: """ Inverse cumulative density function (ICDF) @@ -287,8 +285,8 @@ def ppf( @abstractmethod def cdf( self, - resids: Sequence[float] | ArrayLike1D, - parameters: None | Sequence[float] | ArrayLike1D = None, + resids: Union[Sequence[float], ArrayLike1D], + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> Float64Array: """ Cumulative distribution function @@ -309,7 +307,7 @@ def cdf( @abstractmethod def moment( - self, n: int, parameters: None | Sequence[float] | ArrayLike1D = None + self, n: int, parameters: Union[Sequence[float], ArrayLike1D, None] = None ) -> float: """ Moment of order n @@ -332,7 +330,7 @@ def partial_moment( self, n: int, z: float = 0.0, - parameters: None | Sequence[float] | ArrayLike1D = None, + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> float: r""" Order n lower partial moment from -inf to z @@ -397,9 +395,9 @@ class Normal(Distribution, metaclass=AbstractDocStringInheritor): def __init__( self, - random_state: RandomState | None = None, + random_state: Optional[RandomState] = None, *, - seed: None | int | RandomState | Generator = None, + seed: Union[None, int, RandomState, Generator] = None, ) -> None: super().__init__(random_state=random_state, seed=seed) self._name = "Normal" @@ -412,11 +410,11 @@ def bounds(self, resids: Float64Array) -> list[tuple[float, float]]: def loglikelihood( self, - parameters: Sequence[float] | ArrayLike1D, + parameters: Union[Sequence[float], ArrayLike1D], resids: ArrayLike, sigma2: ArrayLike, individual: bool = False, - ) -> float | Float64Array: + ) -> Union[float , Float64Array]: r"""Computes the log-likelihood of assuming residuals are normally distributed, conditional on the variance @@ -457,12 +455,12 @@ def loglikelihood( def starting_values(self, std_resid: Float64Array) -> Float64Array: return empty(0) - def _simulator(self, size: int | tuple[int, ...]) -> Float64Array: + def _simulator(self, size: Union[int, tuple[int, ...]]) -> Float64Array: return self._generator.standard_normal(size) def simulate( - self, parameters: int | float | Sequence[float | int] | ArrayLike1D - ) -> Callable[[int | tuple[int, ...]], Float64Array]: + self, parameters: Union[int, float, Sequence[Union[float, int]], ArrayLike1D] + ) -> Callable[[Union[int, tuple[int, ...]]], Float64Array]: return self._simulator def parameter_names(self) -> list[str]: @@ -470,16 +468,16 @@ def parameter_names(self) -> list[str]: def cdf( self, - resids: Sequence[float] | ArrayLike1D, - parameters: None | Sequence[float] | ArrayLike1D = None, + resids: Union[Sequence[float], ArrayLike1D], + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> Float64Array: self._check_constraints(parameters) return stats.norm.cdf(asarray(resids)) def ppf( self, - pits: float | Sequence[float] | ArrayLike1D, - parameters: None | Sequence[float] | ArrayLike1D = None, + pits: Union[float, Sequence[float], ArrayLike1D], + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> Float64Array: self._check_constraints(parameters) scalar = isscalar(pits) @@ -494,7 +492,7 @@ def ppf( return ppf def moment( - self, n: int, parameters: None | Sequence[float] | ArrayLike1D = None + self, n: int, parameters: Union[Sequence[float], ArrayLike1D, None] = None ) -> float: if n < 0: return nan @@ -505,7 +503,7 @@ def partial_moment( self, n: int, z: float = 0.0, - parameters: None | Sequence[float] | ArrayLike1D = None, + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> float: if n < 0: return nan @@ -539,9 +537,9 @@ class StudentsT(Distribution, metaclass=AbstractDocStringInheritor): def __init__( self, - random_state: RandomState | None = None, + random_state: Optional[RandomState] = None, *, - seed: None | int | RandomState | Generator = None, + seed: Union[None, int, RandomState, Generator] = None, ) -> None: super().__init__(random_state=random_state, seed=seed) self._name = "Standardized Student's t" @@ -555,11 +553,11 @@ def bounds(self, resids: Float64Array) -> list[tuple[float, float]]: def loglikelihood( self, - parameters: Sequence[float] | ArrayLike1D, + parameters: Union[Sequence[float], ArrayLike1D], resids: ArrayLike, sigma2: ArrayLike, individual: bool = False, - ) -> float | Float64Array: + ) -> Union[float , Float64Array]: r"""Computes the log-likelihood of assuming residuals are have a standardized (to have unit variance) Student's t distribution, conditional on the variance. @@ -628,15 +626,15 @@ def starting_values(self, std_resid: Float64Array) -> Float64Array: sv = max((4.0 * k - 6.0) / (k - 3.0) if k > 3.75 else 12.0, 4.0) return array([sv]) - def _simulator(self, size: int | tuple[int, ...]) -> Float64Array: + def _simulator(self, size: Union[int, tuple[int, ...]]) -> Float64Array: assert self._parameters is not None parameters = self._parameters std_dev = sqrt(parameters[0] / (parameters[0] - 2)) return self._generator.standard_t(self._parameters[0], size=size) / std_dev def simulate( - self, parameters: int | float | Sequence[float | int] | ArrayLike1D - ) -> Callable[[int | tuple[int, ...]], Float64Array]: + self, parameters: Union[int, float, Sequence[Union[float, int]], ArrayLike1D] + ) -> Callable[[Union[int, tuple[int, ...]]], Float64Array]: parameters = ensure1d(parameters, "parameters", False) if parameters[0] <= 2.0: raise ValueError("The shape parameter must be larger than 2") @@ -648,8 +646,8 @@ def parameter_names(self) -> list[str]: def cdf( self, - resids: Sequence[float] | ArrayLike1D, - parameters: None | Sequence[float] | ArrayLike1D = None, + resids: Union[Sequence[float], ArrayLike1D], + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> Float64Array: parameters = self._check_constraints(parameters) nu = parameters[0] @@ -658,8 +656,8 @@ def cdf( def ppf( self, - pits: float | Sequence[float] | ArrayLike1D, - parameters: None | Sequence[float] | ArrayLike1D = None, + pits: Union[float, Sequence[float], ArrayLike1D], + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> Float64Array: parameters = self._check_constraints(parameters) pits = asarray(pits, dtype=float) @@ -668,7 +666,7 @@ def ppf( return stats.t(nu, scale=1.0 / sqrt(var)).ppf(pits) def moment( - self, n: int, parameters: None | Sequence[float] | ArrayLike1D = None + self, n: int, parameters: Union[Sequence[float], ArrayLike1D, None] = None ) -> float: if n < 0: return nan @@ -681,7 +679,7 @@ def partial_moment( self, n: int, z: float = 0.0, - parameters: None | Sequence[float] | ArrayLike1D = None, + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> float: parameters = self._check_constraints(parameters) nu = parameters[0] @@ -774,9 +772,9 @@ class SkewStudent(Distribution, metaclass=AbstractDocStringInheritor): def __init__( self, - random_state: RandomState | None = None, + random_state: Optional[RandomState] = None, *, - seed: None | int | RandomState | Generator = None, + seed: Union[None, int, RandomState, Generator] = None, ) -> None: super().__init__(random_state=random_state, seed=seed) self._name = "Standardized Skew Student's t" @@ -790,7 +788,7 @@ def bounds(self, resids: Float64Array) -> list[tuple[float, float]]: def loglikelihood( self, - parameters: Sequence[float] | ArrayLike1D, + parameters: Union[Sequence[float], ArrayLike1D], resids: ArrayLike, sigma2: ArrayLike, individual: bool = False, @@ -886,7 +884,7 @@ def starting_values(self, std_resid: Float64Array) -> Float64Array: sv = max((4.0 * k - 6.0) / (k - 3.0) if k > 3.75 else 12.0, 4.0) return array([sv, 0.0]) - def _simulator(self, size: int | tuple[int, ...]) -> Float64Array: + def _simulator(self, size: Union[int, tuple[int, ...]]) -> Float64Array: # No need to normalize since it is already done in parameterization assert self._parameters is not None if isinstance(self._generator, Generator): @@ -898,8 +896,8 @@ def _simulator(self, size: int | tuple[int, ...]) -> Float64Array: return ppf def simulate( - self, parameters: int | float | Sequence[float | int] | ArrayLike1D - ) -> Callable[[int | tuple[int, ...]], Float64Array]: + self, parameters: Union[int, float, Sequence[Union[float, int]], ArrayLike1D] + ) -> Callable[[Union[int, tuple[int, ...]]], Float64Array]: parameters = ensure1d(parameters, "parameters", False) if parameters[0] <= 2.0: raise ValueError("The shape parameter must be larger than 2") @@ -913,7 +911,7 @@ def simulate( def parameter_names(self) -> list[str]: return ["eta", "lambda"] - def __const_a(self, parameters: Float64Array | Sequence[float]) -> float: + def __const_a(self, parameters: Union[Float64Array , Sequence[float]]) -> float: """ Compute a constant. @@ -932,7 +930,7 @@ def __const_a(self, parameters: Float64Array | Sequence[float]) -> float: c = self.__const_c(parameters) return float(4 * lam * exp(c) * (eta - 2) / (eta - 1)) - def __const_b(self, parameters: Float64Array | Sequence[float]) -> float: + def __const_b(self, parameters: Union[Float64Array , Sequence[float]]) -> float: """ Compute b constant. @@ -951,7 +949,7 @@ def __const_b(self, parameters: Float64Array | Sequence[float]) -> float: return (1 + 3 * lam ** 2 - a ** 2) ** 0.5 @staticmethod - def __const_c(parameters: Float64Array | Sequence[float]) -> float: + def __const_c(parameters: Union[Float64Array, Sequence[float]]) -> float: """ Compute c constant. @@ -973,8 +971,8 @@ def __const_c(parameters: Float64Array | Sequence[float]) -> float: def cdf( self, - resids: Sequence[float] | ArrayLike1D, - parameters: None | Sequence[float] | ArrayLike1D = None, + resids: Union[Sequence[float], ArrayLike1D], + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> Float64Array: parameters = self._check_constraints(parameters) scalar = isscalar(resids) @@ -999,9 +997,9 @@ def cdf( def ppf( self, - pits: float | Sequence[float] | ArrayLike1D, - parameters: None | Sequence[float] | ArrayLike1D = None, - ) -> float | Float64Array: + pits: Union[float, Sequence[float], ArrayLike1D], + parameters: Union[Sequence[float], ArrayLike1D, None] = None, + ) -> Union[float , Float64Array]: parameters = self._check_constraints(parameters) scalar = isscalar(pits) if scalar: @@ -1029,7 +1027,7 @@ def ppf( return icdf def moment( - self, n: int, parameters: None | Sequence[float] | ArrayLike1D = None + self, n: int, parameters: Union[Sequence[float], ArrayLike1D, None] = None ) -> float: parameters = self._check_constraints(parameters) eta, lam = parameters @@ -1064,7 +1062,7 @@ def partial_moment( self, n: int, z: float = 0.0, - parameters: None | Sequence[float] | ArrayLike1D = None, + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> float: parameters = self._check_constraints(parameters) eta, lam = parameters @@ -1127,9 +1125,9 @@ class GeneralizedError(Distribution, metaclass=AbstractDocStringInheritor): def __init__( self, - random_state: RandomState | None = None, + random_state: Optional[RandomState] = None, *, - seed: None | int | RandomState | Generator = None, + seed: Union[None, int, RandomState, Generator] = None, ) -> None: super().__init__(random_state=random_state, seed=seed) self._name = "Generalized Error Distribution" @@ -1143,7 +1141,7 @@ def bounds(self, resids: Float64Array) -> list[tuple[float, float]]: def loglikelihood( self, - parameters: Sequence[float] | ArrayLike1D, + parameters: Union[Sequence[float], ArrayLike1D], resids: ArrayLike, sigma2: ArrayLike, individual: bool = False, @@ -1219,7 +1217,7 @@ def starting_values(self, std_resid: Float64Array) -> Float64Array: """ return array([1.5]) - def _simulator(self, size: int | tuple[int, ...]) -> Float64Array: + def _simulator(self, size: Union[int, tuple[int, ...]]) -> Float64Array: assert self._parameters is not None parameters = self._parameters nu = parameters[0] @@ -1234,8 +1232,8 @@ def _simulator(self, size: int | tuple[int, ...]) -> Float64Array: return randoms / scale def simulate( - self, parameters: int | float | Sequence[float | int] | ArrayLike1D - ) -> Callable[[int | tuple[int, ...]], Float64Array]: + self, parameters: Union[int, float, Sequence[Union[float, int]], ArrayLike1D] + ) -> Callable[[Union[int, tuple[int, ...]]], Float64Array]: parameters = ensure1d(parameters, "parameters", False) if parameters[0] <= 1.0: raise ValueError("The shape parameter must be larger than 1") @@ -1247,8 +1245,8 @@ def parameter_names(self) -> list[str]: def ppf( self, - pits: float | Sequence[float] | ArrayLike1D, - parameters: None | Sequence[float] | ArrayLike1D = None, + pits: Union[float, Sequence[float], ArrayLike1D], + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> Float64Array: parameters = self._check_constraints(parameters) pits = asarray(pits, dtype=float) @@ -1258,8 +1256,8 @@ def ppf( def cdf( self, - resids: Sequence[float] | ArrayLike1D, - parameters: None | Sequence[float] | ArrayLike1D = None, + resids: Union[Sequence[float], ArrayLike1D], + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> Float64Array: parameters = self._check_constraints(parameters) nu = parameters[0] @@ -1268,7 +1266,7 @@ def cdf( return stats.gennorm(nu, scale=1.0 / sqrt(var)).cdf(resids) def moment( - self, n: int, parameters: None | Sequence[float] | ArrayLike1D = None + self, n: int, parameters: Union[Sequence[float], ArrayLike1D, None] = None ) -> float: if n < 0: return nan @@ -1282,7 +1280,7 @@ def partial_moment( self, n: int, z: float = 0.0, - parameters: None | Sequence[float] | ArrayLike1D = None, + parameters: Union[Sequence[float], ArrayLike1D, None] = None, ) -> float: parameters = self._check_constraints(parameters) nu = parameters[0] diff --git a/arch/univariate/mean.py b/arch/univariate/mean.py index a6f2f9f3ea..2b8b68eb3a 100644 --- a/arch/univariate/mean.py +++ b/arch/univariate/mean.py @@ -3,11 +3,9 @@ :class:`ARCHModel` and provide the same methods with the same inputs. """ -from __future__ import annotations - from collections.abc import Mapping, Sequence import copy -from typing import TYPE_CHECKING, Callable, cast +from typing import TYPE_CHECKING, Callable, Optional, Union, cast import numpy as np import pandas as pd @@ -247,22 +245,17 @@ class HARX(ARCHModel, metaclass=AbstractDocStringInheritor): def __init__( self, - y: ArrayLike | None = None, - x: ArrayLike2D | None = None, - lags: ( - None - | int - | Sequence[int] - | Sequence[Sequence[int]] - | Int32Array - | Int64Array - ) = None, + y: Optional[ArrayLike] = None, + x: Optional[ArrayLike2D] = None, + lags: Union[ + int, Sequence[int], Sequence[Sequence[int]], Int32Array, Int64Array, None + ] = None, constant: bool = True, use_rotated: bool = False, - hold_back: int | None = None, - volatility: VolatilityProcess | None = None, - distribution: Distribution | None = None, - rescale: bool | None = None, + hold_back: Optional[int] = None, + volatility: Optional[VolatilityProcess] = None, + distribution: Optional[Distribution] = None, + rescale: Optional[bool] = None, ) -> None: super().__init__( y, @@ -273,19 +266,14 @@ def __init__( ) self._x = x self._x_names: list[str] = [] - self._x_index: None | NDArray | pd.Index = None - self.lags: ( - None - | int - | Sequence[int] - | Sequence[Sequence[int]] - | Int32Array - | Int64Array - ) = lags + self._x_index: Union[NDArray, pd.Index, None] = None + self.lags: Union[ + int, Sequence[int], Sequence[Sequence[int]], Int32Array, Int64Array, None + ] = lags self._lags = np.empty((0, 0)) self.constant: bool = constant self.use_rotated: bool = use_rotated - self.regressors: Float64Array = np.empty((0, 0), dtype=np.float64) + self.regressors: Float64Array = np.empty((0, 0), dtype=np.double) self._name = "HAR" if self._x is not None: @@ -319,7 +307,7 @@ def _scale_changed(self) -> None: self._init_model() @property - def x(self) -> ArrayLike2D | None: + def x(self) -> Optional[ArrayLike2D]: """Gets the value of the exogenous regressors in the model""" return self._x @@ -372,8 +360,8 @@ def _repr_html_(self) -> str: def resids( self, params: Float64Array, - y: ArrayLike1D | None = None, - regressors: ArrayLike2D | None = None, + y: Optional[ArrayLike1D] = None, + regressors: Optional[ArrayLike2D] = None, ) -> ArrayLike1D: regressors = self._fit_regressors if y is None else regressors y = self._fit_y if y is None else y @@ -393,7 +381,7 @@ def _simulate_mean( parameters: Float64Array, x: Float64Array, errors: Float64Array, - initial_value: None | float | Float64Array, + initial_value: Union[float, Float64Array, None], conditional_variance: Float64Array, ) -> Float64Array: max_lag = 0 if not self._lags.size else int(np.max(self._lags)) @@ -429,12 +417,12 @@ def _simulate_mean( def simulate( self, - params: ArrayLike1D | Sequence[float], + params: Union[ArrayLike1D, Sequence[float]], nobs: int, burn: int = 500, - initial_value: None | float | Float64Array = None, - x: ArrayLike | None = None, - initial_value_vol: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, + x: Optional[ArrayLike] = None, + initial_value_vol: Union[float, Float64Array, None] = None, ) -> pd.DataFrame: """ Simulates data from a linear regression, AR or HAR models @@ -642,23 +630,23 @@ def _init_model(self) -> None: nobs_orig = self._y.shape[0] if self.constant: - reg_constant = np.ones((nobs_orig, 1), dtype=np.float64) + reg_constant = np.ones((nobs_orig, 1), dtype=np.double) else: - reg_constant = np.ones((nobs_orig, 0), dtype=np.float64) + reg_constant = np.ones((nobs_orig, 0), dtype=np.double) if self.lags is not None and nobs_orig > 0: maxlag = np.max(self.lags) lag_array = lagmat(self._y, maxlag) - reg_lags = np.empty((nobs_orig, self._lags.shape[1]), dtype=np.float64) + reg_lags = np.empty((nobs_orig, self._lags.shape[1]), dtype=np.double) for i, lags in enumerate(self._lags.T): reg_lags[:, i] = np.mean(lag_array[:, lags[0] : lags[1]], 1) else: - reg_lags = np.empty((nobs_orig, 0), dtype=np.float64) + reg_lags = np.empty((nobs_orig, 0), dtype=np.double) if self._x is not None: reg_x = self._x else: - reg_x = np.empty((nobs_orig, 0), dtype=np.float64) + reg_x = np.empty((nobs_orig, 0), dtype=np.double) self.regressors = np.hstack((reg_constant, reg_lags, reg_x)) @@ -682,8 +670,8 @@ def _r2(self, params: ArrayLike1D) -> float: def _adjust_sample( self, - first_obs: None | int | DateLike, - last_obs: None | int | DateLike, + first_obs: Union[int, DateLike, None], + last_obs: Union[int, DateLike, None], ) -> None: index = self._y_series.index _first_obs_index = cutoff_to_index(first_obs, index, 0) @@ -797,7 +785,7 @@ def _fit_no_arch_normal_errors( r2 = self._r2(regression_params) first_obs, last_obs = self._fit_indices - resids = np.empty_like(self._y, dtype=np.float64) + resids = np.empty_like(self._y, dtype=np.double) resids.fill(np.nan) resids[first_obs:last_obs] = e vol = np.zeros_like(resids) @@ -831,7 +819,7 @@ def _fit_no_arch_normal_errors( def _reformat_forecast_x( self, - x: None | dict[Label, ArrayLike] | ArrayLike, + x: Union[dict[Label, ArrayLike], ArrayLike, None], horizon: int, start: int, ) -> Float64Array: @@ -880,7 +868,7 @@ def _reformat_forecast_x( f"the included exogenous regressors. {key} not found in: " f"{keys}" ) - temp = np.asarray(x[key], dtype=np.float64) + temp = np.asarray(x[key], dtype=np.double) if temp.ndim == 1: temp = temp.reshape((1, -1)) collected.append(temp) @@ -938,15 +926,15 @@ def forecast( self, params: ArrayLike1D, horizon: int = 1, - start: None | int | DateLike = None, + start: Union[int, DateLike, None] = None, align: Literal["origin", "target"] = "origin", method: ForecastingMethod = "analytic", simulations: int = 1000, - rng: Callable[[int | tuple[int, ...]], Float64Array] | None = None, - random_state: np.random.RandomState | None = None, + rng: Optional[Callable[[Union[int, tuple[int, ...]]], Float64Array]] = None, + random_state: Optional[np.random.RandomState] = None, *, reindex: bool = False, - x: None | dict[Label, ArrayLike] | ArrayLike = None, + x: Union[dict[Label, ArrayLike], ArrayLike, None] = None, ) -> ARCHModelForecast: if not isinstance(horizon, (int, np.integer)) or horizon < 1: raise ValueError("horizon must be an integer >= 1.") @@ -1011,10 +999,10 @@ def forecast( for i in range(horizon): lrf = var_fcasts[:, : (i + 1)].dot(impulse[i::-1] ** 2) longrun_var_fcasts[:, i] = lrf - variance_paths: Float64Array | None = None - mean_paths: Float64Array | None = None - shocks: Float64Array | None = None - long_run_variance_paths: Float64Array | None = None + variance_paths: Optional[Float64Array] = None + mean_paths: Optional[Float64Array] = None + shocks: Optional[Float64Array] = None + long_run_variance_paths: Optional[Float64Array] = None if method.lower() in ("simulation", "bootstrap"): # TODO: This is not tested, but probably right assert isinstance(vfcast.forecast_paths, np.ndarray) @@ -1109,11 +1097,11 @@ class ConstantMean(HARX): def __init__( self, - y: ArrayLike | None = None, - hold_back: int | None = None, - volatility: VolatilityProcess | None = None, - distribution: Distribution | None = None, - rescale: bool | None = None, + y: Optional[ArrayLike] = None, + hold_back: Optional[int] = None, + volatility: Optional[VolatilityProcess] = None, + distribution: Optional[Distribution] = None, + rescale: Optional[bool] = None, ) -> None: super().__init__( y, @@ -1136,12 +1124,12 @@ def _model_description(self, include_lags: bool = False) -> dict[str, str]: def simulate( self, - params: ArrayLike1D | Sequence[float], + params: Union[ArrayLike1D, Sequence[float]], nobs: int, burn: int = 500, - initial_value: None | float | Float64Array = None, - x: ArrayLike | None = None, - initial_value_vol: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, + x: Optional[ArrayLike] = None, + initial_value_vol: Union[float, Float64Array, None] = None, ) -> pd.DataFrame: """ Simulated data from a constant mean model @@ -1206,10 +1194,10 @@ def simulate( def resids( self, params: Float64Array, - y: ArrayLike1D | None = None, - regressors: ArrayLike2D | None = None, + y: Optional[ArrayLike1D] = None, + regressors: Optional[ArrayLike2D] = None, ) -> ArrayLike1D: - y = self._fit_y if y is None else np.asarray(y, dtype=np.float64) + y = self._fit_y if y is None else np.asarray(y, dtype=np.double) return y - params @@ -1255,11 +1243,11 @@ class ZeroMean(HARX): def __init__( self, - y: ArrayLike | None = None, - hold_back: int | None = None, - volatility: VolatilityProcess | None = None, - distribution: Distribution | None = None, - rescale: bool | None = None, + y: Optional[ArrayLike] = None, + hold_back: Optional[int] = None, + volatility: Optional[VolatilityProcess] = None, + distribution: Optional[Distribution] = None, + rescale: Optional[bool] = None, ) -> None: super().__init__( y, @@ -1284,12 +1272,12 @@ def _model_description(self, include_lags: bool = False) -> dict[str, str]: def simulate( self, - params: ArrayLike1D | Sequence[float], + params: Union[ArrayLike1D, Sequence[float]], nobs: int, burn: int = 500, - initial_value: None | float | Float64Array = None, - x: ArrayLike | None = None, - initial_value_vol: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, + x: Optional[ArrayLike] = None, + initial_value_vol: Union[float, Float64Array, None] = None, ) -> pd.DataFrame: """ Simulated data from a zero mean model @@ -1357,8 +1345,8 @@ def simulate( def resids( self, params: Float64Array, - y: ArrayLike1D | None = None, - regressors: ArrayLike2D | None = None, + y: Optional[ArrayLike1D] = None, + regressors: Optional[ArrayLike2D] = None, ) -> ArrayLike1D: if y is not None: return y @@ -1424,14 +1412,14 @@ class ARX(HARX): def __init__( self, - y: ArrayLike | None = None, - x: ArrayLike2D | None = None, - lags: None | int | list[int] | Int32Array | Int64Array = None, + y: Optional[ArrayLike] = None, + x: Optional[ArrayLike2D] = None, + lags: Union[int, list[int], Int32Array, Int64Array, None] = None, constant: bool = True, - hold_back: int | None = None, - volatility: VolatilityProcess | None = None, - distribution: Distribution | None = None, - rescale: bool | None = None, + hold_back: Optional[int] = None, + volatility: Optional[VolatilityProcess] = None, + distribution: Optional[Distribution] = None, + rescale: Optional[bool] = None, ) -> None: # Convert lags to 2-d format @@ -1505,7 +1493,7 @@ class LS(HARX): ---------- y : {ndarray, Series} nobs element vector containing the dependent variable - y : {ndarray, DataFrame}, optional + x : {ndarray, DataFrame}, optional nobs by k element array containing exogenous regressors constant : bool, optional Flag whether the model should include a constant @@ -1544,13 +1532,13 @@ class LS(HARX): def __init__( self, - y: ArrayLike | None = None, - x: ArrayLike2D | None = None, + y: Optional[ArrayLike] = None, + x: Optional[ArrayLike2D] = None, constant: bool = True, - hold_back: int | None = None, - volatility: VolatilityProcess | None = None, - distribution: Distribution | None = None, - rescale: bool | None = None, + hold_back: Optional[int] = None, + volatility: Optional[VolatilityProcess] = None, + distribution: Optional[Distribution] = None, + rescale: Optional[bool] = None, ) -> None: # Convert lags to 2-d format super().__init__( @@ -1632,15 +1620,15 @@ class ARCHInMean(ARX): def __init__( self, - y: ArrayLike | None = None, - x: ArrayLike2D | None = None, - lags: None | int | list[int] | Int32Array | Int64Array = None, + y: Optional[ArrayLike] = None, + x: Optional[ArrayLike2D] = None, + lags: Union[int, list[int], Int32Array, Int64Array, None] = None, constant: bool = True, - hold_back: int | None = None, - volatility: VolatilityProcess | None = None, - distribution: Distribution | None = None, - rescale: bool | None = None, - form: int | float | Literal["log", "vol", "var"] = "vol", + hold_back: Optional[int] = None, + volatility: Optional[VolatilityProcess] = None, + distribution: Optional[Distribution] = None, + rescale: Optional[bool] = None, + form: Union[int, float, Literal["log", "vol", "var"]] = "vol", ) -> None: super().__init__( y, x, lags, constant, hold_back, volatility, distribution, rescale @@ -1679,7 +1667,7 @@ def __init__( self._recursion = ARCHInMeanRecursion(self._volatility_updater) @property - def form(self) -> int | float | Literal["log", "vol", "var"]: + def form(self) -> Union[int, float, Literal["log", "vol", "var"]]: """The form of the conditional variance in the mean""" return self._form @@ -1709,15 +1697,15 @@ def forecast( self, params: ArrayLike1D, horizon: int = 1, - start: None | int | DateLike = None, + start: Union[int, DateLike, None] = None, align: Literal["origin", "target"] = "origin", method: ForecastingMethod = "analytic", simulations: int = 1000, - rng: Callable[[int | tuple[int, ...]], Float64Array] | None = None, - random_state: np.random.RandomState | None = None, + rng: Optional[Callable[[Union[int, tuple[int, ...]]], Float64Array]] = None, + random_state: Optional[np.random.RandomState] = None, *, - reindex: bool | None = None, - x: None | dict[Label, ArrayLike] | ArrayLike = None, + reindex: Optional[bool] = None, + x: Union[dict[Label, ArrayLike], ArrayLike, None] = None, ) -> ARCHModelForecast: raise NotImplementedError( "forecasts are not implemented for (G)ARCH-in-mean models" @@ -1726,8 +1714,8 @@ def forecast( def resids( self, params: Float64Array, - y: ArrayLike1D | None = None, - regressors: ArrayLike2D | None = None, + y: Optional[ArrayLike1D] = None, + regressors: Optional[ArrayLike2D] = None, ) -> ArrayLike1D: return super().resids(params[:-1], y=y, regressors=regressors) @@ -1738,10 +1726,10 @@ def _loglikelihood( self, parameters: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, individual: bool = False, - ) -> float | Float64Array: + ) -> Union[float, Float64Array]: # Parse parameters _callback_info["count"] += 1 @@ -1776,7 +1764,7 @@ def _simulate_mean( parameters: Float64Array, x: Float64Array, errors: Float64Array, - initial_value: None | float | Float64Array, + initial_value: Union[float, Float64Array, None], conditional_variance: Float64Array, ) -> Float64Array: """ @@ -1831,16 +1819,16 @@ def _simulate_mean( def arch_model( - y: ArrayLike | None, - x: ArrayLike2D | None = None, + y: Optional[ArrayLike], + x: Optional[ArrayLike2D] = None, mean: Literal[ "Constant", "Zero", "LS", "AR", "ARX", "HAR", "HARX", "constant", "zero" ] = "Constant", - lags: None | int | list[int] | Int32Array | Int64Array = 0, + lags: Union[int, list[int], Int32Array, Int64Array, None] = 0, vol: Literal[ "GARCH", "ARCH", "EGARCH", "FIGARCH", "APARCH", "HARCH", "FIGARCH" ] = "GARCH", - p: int | list[int] = 1, + p: Union[int, list[int]] = 1, o: int = 0, q: int = 1, power: float = 2.0, @@ -1854,8 +1842,8 @@ def arch_model( "ged", "generalized error", ] = "normal", - hold_back: int | None = None, - rescale: bool | None = None, + hold_back: Optional[int] = None, + rescale: Optional[bool] = None, ) -> HARX: """ Initialization of common ARCH model specifications diff --git a/arch/univariate/recursions.pyi b/arch/univariate/recursions.pyi index 2772ed4d21..63bdeb5a49 100644 --- a/arch/univariate/recursions.pyi +++ b/arch/univariate/recursions.pyi @@ -1,3 +1,5 @@ +from typing import Optional, Union + from arch.typing import Float64Array, Int32Array def harch_recursion( @@ -104,7 +106,7 @@ def garch_core( class VolatilityUpdater: def initialize_update( - self, parameters: Float64Array, backcast: float | Float64Array, nobs: int + self, parameters: Float64Array, backcast: Union[float, Float64Array], nobs: int ) -> None: ... def _update_tester( self, @@ -119,7 +121,7 @@ class GARCHUpdater(VolatilityUpdater): def __init__(self, p: int, o: int, q: int, power: float) -> None: ... class EWMAUpdater(VolatilityUpdater): - def __init__(self, lam: float | None) -> None: ... + def __init__(self, lam: Optional[float]) -> None: ... class FIGARCHUpdater(VolatilityUpdater): def __init__(self, p: int, q: int, power: float, truncation: int) -> None: ... diff --git a/arch/univariate/recursions.pyx b/arch/univariate/recursions.pyx index 56d8f49aed..1fbe0ca33c 100644 --- a/arch/univariate/recursions.pyx +++ b/arch/univariate/recursions.pyx @@ -63,15 +63,15 @@ def harch_core( t: int Location of variance to compute. Assumes variance has been computed at times t-1, t-2, ... - parameters : 1-d array, float64 + parameters : 1-d array, double Model parameters - resids : 1-d array, float64 + resids : 1-d array, double Residuals to use in the recursion - sigma2 : 1-d array, float64 + sigma2 : 1-d array, double Conditional variances with same shape as resids lags : 1-d array, int Lag lengths in the HARCH - backcast : float64 + backcast : double Value to use when initializing the recursion var_bounds : 2-d array nobs by 2-element array of upper and lower bounds for conditional @@ -109,17 +109,17 @@ def harch_recursion(const double[::1] parameters, """ Parameters ---------- - parameters : 1-d array, float64 + parameters : 1-d array, double Model parameters - resids : 1-d array, float64 + resids : 1-d array, double Residuals to use in the recursion - sigma2 : 1-d array, float64 + sigma2 : 1-d array, double Conditional variances with same shape as resids lags : 1-d array, int Lag lengths in the HARCH nobs : int Length of resids - backcast : float64 + backcast : double Value to use when initializing the recursion var_bounds : 2-d array nobs by 2-element array of upper and lower bounds for conditional @@ -154,17 +154,17 @@ def arch_recursion(const double[::1] parameters, """ Parameters ---------- - parameters : 1-d array, float64 + parameters : 1-d array, double Model parameters - resids : 1-d array, float64 + resids : 1-d array, double Residuals to use in the recursion - sigma2 : 1-d array, float64 + sigma2 : 1-d array, double Conditional variances with same shape as resids p : int Number of lags in ARCH model nobs : int Length of resids - backcast : float64 + backcast : double Value to use when initializing the recursion var_bounds : 2-d array nobs by 2-element array of upper and lower bounds for conditional @@ -274,14 +274,14 @@ def garch_recursion(const double[::1] parameters, Parameters ---------- - parameters : 1-d array, float64 + parameters : 1-d array, double Model parameters - fresids : 1-d array, float64 + fresids : 1-d array, double Absolute value of residuals raised to the power in the model. For example, in a standard GARCH model, the power is 2.0. - sresids : 1-d array, float64 + sresids : 1-d array, double Variable containing the sign of the residuals (-1.0, 0.0, 1.0) - sigma2 : 1-d array, float64 + sigma2 : 1-d array, double Conditional variances with same shape as resids p : int Number of symmetric innovations in model @@ -291,7 +291,7 @@ def garch_recursion(const double[::1] parameters, Number of lags of the (transformed) variance in the model nobs : int Length of resids - backcast : float64 + backcast : double Value to use when initializing the recursion var_bounds : 2-d array nobs by 2-element array of upper and lower bounds for conditional @@ -345,11 +345,11 @@ def egarch_recursion(const double[::1] parameters, Parameters ---------- - parameters : 1-d array, float64 + parameters : 1-d array, double Model parameters - resids : 1-d array, float64 + resids : 1-d array, double Residuals to use in the recursion - sigma2 : 1-d array, float64 + sigma2 : 1-d array, double Conditional variances with same shape as resids p : int Number of symmetric innovations in model @@ -359,16 +359,16 @@ def egarch_recursion(const double[::1] parameters, Number of lags of the (transformed) variance in the model nobs : int Length of resids - backcast : float64 + backcast : double Value to use when initializing the recursion var_bounds : 2-d array nobs by 2-element array of upper and lower bounds for conditional variances for each time period - lnsigma2 : 1-d array, float64 + lnsigma2 : 1-d array, double Temporary array (overwritten) with same shape as resids - std_resids : 1-d array, float64 + std_resids : 1-d array, double Temporary array (overwritten) with same shape as resids - abs_std_resids : 1-d array, float64 + abs_std_resids : 1-d array, double Temporary array (overwritten) with same shape as resids """ @@ -420,17 +420,17 @@ def midas_recursion(const double[::1] parameters, """ Parameters ---------- - parameters : 1-d array, float64 + parameters : 1-d array, double Model parameters - weights : 1-d array, float64 + weights : 1-d array, double Weights for MIDAS recursions - resids : 1-d array, float64 + resids : 1-d array, double Residuals to use in the recursion - sigma2 : 1-d array, float64 + sigma2 : 1-d array, double Conditional variances with same shape as resids nobs : int Length of resids - backcast : float64 + backcast : double Value to use when initializing the recursion var_bounds : 2-d array nobs by 2-element array of upper and lower bounds for conditional @@ -445,13 +445,13 @@ def midas_recursion(const double[::1] parameters, alpha = parameters[1] gamma = parameters[2] - aw = np.zeros(m, dtype=np.float64) - gw = np.zeros(m, dtype=np.float64) + aw = np.zeros(m, dtype=np.double) + gw = np.zeros(m, dtype=np.double) for i in range(m): aw[i] = alpha * weights[i] gw[i] = gamma * weights[i] - resids2 = np.zeros(nobs, dtype=np.float64) + resids2 = np.zeros(nobs, dtype=np.double) for t in range(nobs): resids2[t] = resids[t] * resids[t] @@ -593,6 +593,23 @@ def aparch_recursion(const double[::1] parameters, cdef class VolatilityUpdater: + """ + Base class that all volatility updaters must inherit from. + + Notes + ----- + See the implementation available for information on modifying ``__init__`` + to capture model-specific parameters and how ``initialize_update`` is + used to precompute values that change in each likelihood but not + each iteration of the recursion. + + When writing a volatility updater, it is recommended to follow the + examples in recursions.pyx which use Cython to produce a C-callable + update function that can then be used to improve performance. The + subclasses of this abstract metaclass are all pure Python and + model estimation performance is poor since loops are written + in Python. + """ def __init__(self): pass @@ -795,7 +812,7 @@ cdef class MIDASUpdater(VolatilityUpdater): self.gw = np.empty(m) self.weights = np.empty(m) self.resids2 = np.empty(0) - self.DOUBLE_EPS = np.finfo(np.float64).eps + self.DOUBLE_EPS = np.finfo(np.double).eps def __setstate__(self, state): cdef Py_ssize_t i diff --git a/arch/univariate/recursions_python.py b/arch/univariate/recursions_python.py index 9395ff80b8..a3e2457db7 100644 --- a/arch/univariate/recursions_python.py +++ b/arch/univariate/recursions_python.py @@ -5,12 +5,10 @@ python -m pip install . """ -from __future__ import annotations - from arch.compat.numba import jit from abc import ABCMeta, abstractmethod -from typing import cast +from typing import Optional, Union, cast import numpy as np from scipy.special import gammaln @@ -661,7 +659,7 @@ def __init__(self) -> None: @abstractmethod def initialize_update( - self, parameters: Float64Array, backcast: float | Float64Array, nobs: int + self, parameters: Float64Array, backcast: Union[float, Float64Array], nobs: int ) -> None: """ Initialize the recursion prior to calling update @@ -755,7 +753,7 @@ def __init__(self, p: int, o: int, q: int, power: float) -> None: self.backcast = -1.0 def initialize_update( - self, parameters: Float64Array, backcast: float | Float64Array, nobs: int + self, parameters: Float64Array, backcast: Union[float, Float64Array], nobs: int ) -> None: self.backcast = cast(float, backcast) @@ -802,7 +800,7 @@ def __init__(self, lags: Int32Array) -> None: self.backcast = -1.0 def initialize_update( - self, parameters: Float64Array, backcast: float | Float64Array, nobs: int + self, parameters: Float64Array, backcast: Union[float, Float64Array], nobs: int ) -> None: self.backcast = cast(float, backcast) @@ -829,7 +827,7 @@ def update( class EWMAUpdater(VolatilityUpdater, metaclass=AbstractDocStringInheritor): - def __init__(self, lam: float | None) -> None: + def __init__(self, lam: Optional[float]) -> None: super().__init__() self.estimate_lam = lam is None self.params = np.zeros(3) @@ -838,7 +836,7 @@ def __init__(self, lam: float | None) -> None: self.params[2] = lam def initialize_update( - self, parameters: Float64Array, backcast: float | Float64Array, nobs: int + self, parameters: Float64Array, backcast: Union[float, Float64Array], nobs: int ) -> None: if self.estimate_lam: self.params[1] = 1.0 - parameters[0] @@ -873,7 +871,7 @@ def __init__(self, m: int, asym: bool) -> None: self.gw = np.empty(m) self.weights = np.empty(m) self.resids2 = np.empty(0) - self.DOUBLE_EPS = float(np.finfo(np.float64).eps) + self.DOUBLE_EPS = float(np.finfo(np.double).eps) def update_weights(self, theta: float) -> None: sum_w = 0.0 @@ -892,7 +890,7 @@ def update_weights(self, theta: float) -> None: self.weights[i] /= sum_w def initialize_update( - self, parameters: Float64Array, backcast: float | Float64Array, nobs: int + self, parameters: Float64Array, backcast: Union[float, Float64Array], nobs: int ) -> None: self.update_weights(parameters[2 + self.asym]) alpha = parameters[1] @@ -943,7 +941,7 @@ def __init__(self, p: int, q: int, power: float, truncation: int) -> None: self.fresids = np.empty(0) def initialize_update( - self, parameters: Float64Array, backcast: float | Float64Array, nobs: int + self, parameters: Float64Array, backcast: Union[float, Float64Array], nobs: int ) -> None: self.lam = figarch_weights(parameters[1:], self.p, self.q, self.truncation) self.backcast = backcast @@ -993,7 +991,7 @@ def __init__( self.last_sigma2s = np.empty((1, kmax)) def initialize_update( - self, parameters: Float64Array, backcast: float | Float64Array, nobs: int + self, parameters: Float64Array, backcast: Union[float, Float64Array], nobs: int ) -> None: self.backcast = cast(Float64Array, backcast) @@ -1033,7 +1031,7 @@ def _resize(self, nobs: int) -> None: self.std_resids = np.empty(nobs) def initialize_update( - self, parameters: Float64Array, backcast: float | Float64Array, nobs: int + self, parameters: Float64Array, backcast: Union[float, Float64Array], nobs: int ) -> None: self.backcast = cast(float, backcast) self._resize(nobs) diff --git a/arch/univariate/volatility.py b/arch/univariate/volatility.py index 2738301bb5..7a9d4d5c37 100644 --- a/arch/univariate/volatility.py +++ b/arch/univariate/volatility.py @@ -4,12 +4,10 @@ same inputs. """ -from __future__ import annotations - from abc import ABCMeta, abstractmethod from collections.abc import Sequence import itertools -from typing import TYPE_CHECKING, cast +from typing import TYPE_CHECKING, Optional, Union, cast from warnings import warn import numpy as np @@ -82,7 +80,7 @@ def __init__( self, std_resid: Float64Array, start: int, - random_state: RandomState | None = None, + random_state: Optional[RandomState] = None, ) -> None: if start <= 0 or start > std_resid.shape[0]: raise ValueError("start must be > 0 and <= len(std_resid).") @@ -102,7 +100,7 @@ def random_state(self) -> RandomState: return self._random_state def rng(self) -> RNGType: - def _rng(size: int | tuple[int, ...]) -> Float64Array: + def _rng(size: Union[int, tuple[int, ...]]) -> Float64Array: if self._index >= self.std_resid.shape[0]: raise IndexError("not enough data points.") index = self._random_state.random_sample(size) @@ -152,29 +150,46 @@ def ewma_recursion( class VarianceForecast: + """ + Container for variance forecasts + + Parameters + ---------- + forecasts : ndarray + Array containing the forecasts + forecast_paths : ndarray, optional + Array containing the forecast paths if using simulation or bootstrap + shocks : ndarray, optional + Array containing the shocks used to generate the forecast paths if + using simulation or bootstrap + """ + _forecasts = None _forecast_paths = None def __init__( self, forecasts: Float64Array, - forecast_paths: Float64Array | None = None, - shocks: Float64Array | None = None, + forecast_paths: Optional[Float64Array] = None, + shocks: Optional[Float64Array] = None, ) -> None: self._forecasts = forecasts self._forecast_paths = forecast_paths self._shocks = shocks @property - def forecasts(self) -> Float64Array | None: + def forecasts(self) -> Optional[Float64Array]: + """The variance forecasts""" return self._forecasts @property - def forecast_paths(self) -> Float64Array | None: + def forecast_paths(self) -> Optional[Float64Array]: + """The variance forecast paths""" return self._forecast_paths @property - def shocks(self) -> Float64Array | None: + def shocks(self) -> Optional[Float64Array]: + """The shocks used to construct the variance forecast paths""" return self._shocks @@ -195,7 +210,7 @@ def __init__(self) -> None: self._min_bootstrap_obs = 100 self._start = 0 self._stop = -1 - self._volatility_updater: rec.VolatilityUpdater | None = None + self._volatility_updater: Optional[rec.VolatilityUpdater] = None def __str__(self) -> str: return self.name @@ -262,7 +277,7 @@ def update( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> float: """ @@ -317,7 +332,7 @@ def _one_step_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, horizon: int, start_index: int, @@ -365,7 +380,7 @@ def _analytic_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -402,7 +417,7 @@ def _simulation_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -450,12 +465,12 @@ def _bootstrap_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, simulations: int, - random_state: RandomState | None, + random_state: Optional[RandomState], ) -> VarianceForecast: """ Simulation-based volatility forecasts using model residuals @@ -562,7 +577,7 @@ def starting_values(self, resids: Float64Array) -> Float64Array: Array of starting values """ - def backcast(self, resids: Float64Array) -> float | Float64Array: + def backcast(self, resids: Float64Array) -> Union[float, Float64Array]: """ Construct values for backcasting to start the recursion @@ -583,8 +598,8 @@ def backcast(self, resids: Float64Array) -> float | Float64Array: return float(np.sum((resids[:tau] ** 2.0) * w)) def backcast_transform( - self, backcast: float | Float64Array - ) -> float | Float64Array: + self, backcast: Union[float, Float64Array] + ) -> Union[float, Float64Array]: """ Transformation to apply to user-provided backcast values @@ -624,7 +639,7 @@ def compute_variance( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> Float64Array: """ @@ -668,14 +683,14 @@ def forecast( self, parameters: ArrayLike1D, resids: Float64Array, - backcast: Float64Array | float, + backcast: Union[float, Float64Array], var_bounds: Float64Array, - start: int | None = None, + start: Optional[int] = None, horizon: int = 1, method: ForecastingMethod = "analytic", simulations: int = 1000, - rng: RNGType | None = None, - random_state: RandomState | None = None, + rng: Optional[RNGType] = None, + random_state: Optional[RandomState] = None, ) -> VarianceForecast: """ Forecast volatility from the model @@ -774,11 +789,11 @@ def forecast( @abstractmethod def simulate( self, - parameters: Sequence[int | float] | ArrayLike1D, + parameters: Union[Sequence[Union[int, float]], ArrayLike1D], nobs: int, rng: RNGType, burn: int = 500, - initial_value: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, ) -> tuple[Float64Array, Float64Array]: """ Simulate data from the model @@ -811,7 +826,7 @@ def _gaussian_loglikelihood( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> float: """ @@ -855,7 +870,7 @@ def compute_variance( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> Float64Array: sigma2[:] = parameters[0] @@ -866,11 +881,11 @@ def starting_values(self, resids: Float64Array) -> Float64Array: def simulate( self, - parameters: Sequence[int | float] | ArrayLike1D, + parameters: Union[Sequence[Union[int, float]], ArrayLike1D], nobs: int, rng: RNGType, burn: int = 500, - initial_value: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, ) -> tuple[Float64Array, Float64Array]: parameters = ensure1d(parameters, "parameters", False) errors = rng(nobs + burn) @@ -882,12 +897,12 @@ def constraints(self) -> tuple[Float64Array, Float64Array]: return np.ones((1, 1)), np.zeros(1) def backcast_transform( - self, backcast: float | Float64Array - ) -> float | Float64Array: + self, backcast: Union[float, Float64Array] + ) -> Union[float, Float64Array]: backcast = super().backcast_transform(backcast) return backcast - def backcast(self, resids: Float64Array) -> float | Float64Array: + def backcast(self, resids: Float64Array) -> Union[float, Float64Array]: return float(resids.var()) def bounds(self, resids: Float64Array) -> list[tuple[float, float]]: @@ -906,7 +921,7 @@ def _analytic_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -922,7 +937,7 @@ def _simulation_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -1100,7 +1115,7 @@ def compute_variance( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> Float64Array: # fresids is abs(resids) ** power @@ -1121,12 +1136,12 @@ def compute_variance( return sigma2 def backcast_transform( - self, backcast: float | Float64Array - ) -> float | Float64Array: + self, backcast: Union[float, Float64Array] + ) -> Union[float, Float64Array]: backcast = super().backcast_transform(backcast) return np.sqrt(backcast) ** self.power - def backcast(self, resids: Float64Array) -> float | Float64Array: + def backcast(self, resids: Float64Array) -> Union[float, Float64Array]: power = self.power tau = min(75, resids.shape[0]) w = 0.94 ** np.arange(tau) @@ -1137,11 +1152,11 @@ def backcast(self, resids: Float64Array) -> float | Float64Array: def simulate( self, - parameters: Sequence[int | float] | ArrayLike1D, + parameters: Union[Sequence[Union[int, float]], ArrayLike1D], nobs: int, rng: RNGType, burn: int = 500, - initial_value: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, ) -> tuple[Float64Array, Float64Array]: parameters = ensure1d(parameters, "parameters", False) p, o, q, power = self.p, self.o, self.q, self.power @@ -1241,7 +1256,7 @@ def _analytic_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -1349,7 +1364,7 @@ def _simulation_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -1450,7 +1465,7 @@ class HARCH(VolatilityProcess, metaclass=AbstractDocStringInheritor): more general ARCH process have been restricted. """ - def __init__(self, lags: int | Sequence[int] = 1) -> None: + def __init__(self, lags: Union[int, Sequence[int]] = 1) -> None: super().__init__() if not isinstance(lags, Sequence): lag_val = int(lags) @@ -1493,7 +1508,7 @@ def compute_variance( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> Float64Array: lags = self.lags @@ -1506,11 +1521,11 @@ def compute_variance( def simulate( self, - parameters: Sequence[int | float] | ArrayLike1D, + parameters: Union[Sequence[Union[int, float]], ArrayLike1D], nobs: int, rng: RNGType, burn: int = 500, - initial_value: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, ) -> tuple[Float64Array, Float64Array]: parameters = ensure1d(parameters, "parameters", False) lags = self.lags @@ -1565,7 +1580,7 @@ def _common_forecast_components( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], horizon: int, ) -> tuple[float, Float64Array, Float64Array]: arch_params = self._harch_to_arch(parameters) @@ -1590,7 +1605,7 @@ def _analytic_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -1610,7 +1625,7 @@ def _simulation_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -1687,13 +1702,14 @@ class MIDASHyperbolic(VolatilityProcess, metaclass=AbstractDocStringInheritor): \phi_{i}(\theta) \propto \Gamma(i+\theta)/(\Gamma(i+1)\Gamma(\theta)) where :math:`\Gamma` is the gamma function. :math:`\{\phi_i(\theta)\}` is - normalized so that :math:`\sum \phi_i(\theta)=1` + normalized so that :math:`\sum \phi_i(\theta)=1`. See [1]_ and [2]_ for + further details. References ---------- - .. [*] Foroni, Claudia, and Massimiliano Marcellino. "A survey of + .. [1] Foroni, Claudia, and Massimiliano Marcellino. "A survey of Econometric Methods for Mixed-Frequency Data". Norges Bank. (2013). - .. [*] Sheppard, Kevin. "Direct volatility modeling". Manuscript. (2018). + .. [2] Sheppard, Kevin. "Direct volatility modeling". Manuscript. (2018). """ def __init__(self, m: int = 22, asym: bool = False) -> None: @@ -1762,7 +1778,7 @@ def compute_variance( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> Float64Array: nobs = resids.shape[0] @@ -1778,11 +1794,11 @@ def compute_variance( def simulate( self, - parameters: Sequence[int | float] | ArrayLike1D, + parameters: Union[Sequence[Union[int, float]], ArrayLike1D], nobs: int, rng: RNGType, burn: int = 500, - initial_value: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, ) -> tuple[Float64Array, Float64Array]: parameters = np.asarray(ensure1d(parameters, "parameters", False), dtype=float) if self._asym: @@ -1860,7 +1876,7 @@ def parameter_names(self) -> list[str]: def _weights(self, params: Float64Array) -> Float64Array: m = self.m # Prevent 0 - theta = max(params[-1], np.finfo(np.float64).eps) + theta = max(params[-1], np.finfo(np.double).eps) j = np.arange(1.0, m + 1) w = gammaln(theta + j) - gammaln(j + 1) - gammaln(theta) w = np.exp(w) @@ -1870,7 +1886,7 @@ def _common_forecast_components( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], horizon: int, ) -> tuple[int, Float64Array, Float64Array, Float64Array, Float64Array]: if self._asym: @@ -1904,7 +1920,7 @@ def _analytic_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -1930,7 +1946,7 @@ def _simulation_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -2054,9 +2070,9 @@ class EWMAVariance(VolatilityProcess, metaclass=AbstractDocStringInheritor): parameter when fitting the model. """ - def __init__(self, lam: float | None = 0.94) -> None: + def __init__(self, lam: Optional[float] = 0.94) -> None: super().__init__() - self.lam: float | None = lam + self.lam: Optional[float] = lam self._estimate_lam = lam is None self._num_params = 1 if self._estimate_lam else 0 if lam is not None and not 0.0 < lam < 1.0: @@ -2092,7 +2108,7 @@ def compute_variance( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> Float64Array: lam = parameters[0] if self._estimate_lam else self.lam @@ -2107,11 +2123,11 @@ def constraints(self) -> tuple[Float64Array, Float64Array]: def simulate( self, - parameters: Sequence[int | float] | ArrayLike1D, + parameters: Union[Sequence[Union[int, float]], ArrayLike1D], nobs: int, rng: RNGType, burn: int = 500, - initial_value: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, ) -> tuple[Float64Array, Float64Array]: parameters = ensure1d(parameters, "parameters", False) errors = rng(nobs + burn) @@ -2144,7 +2160,7 @@ def _analytic_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -2160,7 +2176,7 @@ def _simulation_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -2285,7 +2301,7 @@ def _ewma_smoothing_parameters(self) -> Float64Array: mus = cast(Float64Array, np.exp(-1.0 / taus)) return mus - def backcast(self, resids: Float64Array) -> float | Float64Array: + def backcast(self, resids: Float64Array) -> Union[float, Float64Array]: """ Construct values for backcasting to start the recursion @@ -2315,8 +2331,8 @@ def backcast(self, resids: Float64Array) -> float | Float64Array: return backcast def backcast_transform( - self, backcast: float | Float64Array - ) -> float | Float64Array: + self, backcast: Union[float, Float64Array] + ) -> Union[float, Float64Array]: backcast = super().backcast_transform(backcast) mus = self._ewma_smoothing_parameters() backcast_arr = np.asarray(backcast) @@ -2337,9 +2353,7 @@ def parameter_names(self) -> list[str]: return [] def variance_bounds(self, resids: Float64Array, power: float = 2.0) -> Float64Array: - return np.ones((resids.shape[0], 1)) * np.array( - [-1.0, np.finfo(np.float64).max] - ) + return np.ones((resids.shape[0], 1)) * np.array([-1.0, np.finfo(np.double).max]) def bounds(self, resids: Float64Array) -> list[tuple[float, float]]: return [] @@ -2352,7 +2366,7 @@ def compute_variance( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> Float64Array: nobs = resids.shape[0] @@ -2374,11 +2388,11 @@ def compute_variance( def simulate( self, - parameters: Sequence[int | float] | ArrayLike1D, + parameters: Union[Sequence[Union[int, float]], ArrayLike1D], nobs: int, rng: RNGType, burn: int = 500, - initial_value: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, ) -> tuple[Float64Array, Float64Array]: errors = rng(nobs + burn) @@ -2410,7 +2424,7 @@ def _analytic_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -2426,7 +2440,7 @@ def _simulation_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -2530,7 +2544,7 @@ def __init__(self, p: int = 1, o: int = 0, q: int = 1) -> None: raise ValueError("One of p or o must be strictly positive") self._name = "EGARCH" if q > 0 else "EARCH" # Helpers for fitting variance - self._arrays: tuple[Float64Array, Float64Array, Float64Array] | None = None + self._arrays: Optional[tuple[Float64Array, Float64Array, Float64Array]] = None self._volatility_updater = rec.EGARCHUpdater(self.p, self.o, self.q) def __str__(self) -> str: @@ -2568,7 +2582,7 @@ def compute_variance( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> Float64Array: p, o, q = self.p, self.o, self.q @@ -2599,21 +2613,21 @@ def compute_variance( return sigma2 def backcast_transform( - self, backcast: float | Float64Array - ) -> float | Float64Array: + self, backcast: Union[float, Float64Array] + ) -> Union[float, Float64Array]: backcast = super().backcast_transform(backcast) return float(np.log(backcast)) - def backcast(self, resids: Float64Array) -> float | Float64Array: + def backcast(self, resids: Float64Array) -> Union[float, Float64Array]: return float(np.log(super().backcast(resids))) def simulate( self, - parameters: Sequence[int | float] | ArrayLike1D, + parameters: Union[Sequence[Union[int, float]], ArrayLike1D], nobs: int, rng: RNGType, burn: int = 500, - initial_value: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, ) -> tuple[Float64Array, Float64Array]: parameters = ensure1d(parameters, "parameters", False) p, o, q = self.p, self.o, self.q @@ -2703,7 +2717,7 @@ def _analytic_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -2718,7 +2732,7 @@ def _simulation_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -2816,7 +2830,7 @@ def compute_variance( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> Float64Array: if self._stop - self._start != sigma2.shape[0]: @@ -2834,11 +2848,11 @@ def starting_values(self, resids: Float64Array) -> Float64Array: def simulate( self, - parameters: Sequence[int | float] | ArrayLike1D, + parameters: Union[Sequence[Union[int, float]], ArrayLike1D], nobs: int, rng: RNGType, burn: int = 500, - initial_value: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, ) -> tuple[Float64Array, Float64Array]: raise NotImplementedError("Fixed Variance processes do not support simulation") @@ -2848,7 +2862,7 @@ def constraints(self) -> tuple[Float64Array, Float64Array]: else: return np.ones((0, 0)), np.zeros(0) - def backcast(self, resids: Float64Array) -> float | Float64Array: + def backcast(self, resids: Float64Array) -> Union[float, Float64Array]: return 1.0 def bounds(self, resids: Float64Array) -> list[tuple[float, float]]: @@ -2873,7 +2887,7 @@ def _analytic_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -2887,7 +2901,7 @@ def _simulation_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -3033,7 +3047,7 @@ def _generate_name(self) -> str: return f"Power FIGARCH (power: {self.power:0.1f})" def bounds(self, resids: Float64Array) -> list[tuple[float, float]]: - eps_half = np.sqrt(np.finfo(np.float64).eps) + eps_half = np.sqrt(np.finfo(np.double).eps) v = np.mean(abs(resids) ** self.power) bounds = [(0.0, 10.0 * float(v))] @@ -3076,7 +3090,7 @@ def compute_variance( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> Float64Array: # fresids is abs(resids) ** power @@ -3095,12 +3109,12 @@ def compute_variance( return sigma2 def backcast_transform( - self, backcast: float | Float64Array - ) -> float | Float64Array: + self, backcast: Union[float, Float64Array] + ) -> Union[float, Float64Array]: backcast = super().backcast_transform(backcast) return np.sqrt(backcast) ** self.power - def backcast(self, resids: Float64Array) -> float | Float64Array: + def backcast(self, resids: Float64Array) -> Union[float, Float64Array]: power = self.power tau = min(75, resids.shape[0]) w = 0.94 ** np.arange(tau) @@ -3111,11 +3125,11 @@ def backcast(self, resids: Float64Array) -> float | Float64Array: def simulate( self, - parameters: Sequence[int | float] | ArrayLike1D, + parameters: Union[Sequence[Union[int, float]], ArrayLike1D], nobs: int, rng: RNGType, burn: int = 500, - initial_value: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, ) -> tuple[Float64Array, Float64Array]: parameters = ensure1d(parameters, "parameters", False) truncation = self.truncation @@ -3226,7 +3240,7 @@ def _analytic_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -3268,7 +3282,7 @@ def _simulation_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -3379,7 +3393,7 @@ def __init__( p: int = 1, o: int = 1, q: int = 1, - delta: float | None = None, + delta: Optional[float] = None, common_asym: bool = False, ) -> None: super().__init__() @@ -3439,7 +3453,7 @@ def compute_variance( parameters: Float64Array, resids: Float64Array, sigma2: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, ) -> Float64Array: abs_resids = np.abs(resids) @@ -3558,11 +3572,11 @@ def parameter_names(self) -> list[str]: def simulate( self, - parameters: Sequence[int | float] | ArrayLike1D, + parameters: Union[Sequence[Union[int, float]], ArrayLike1D], nobs: int, rng: RNGType, burn: int = 500, - initial_value: None | float | Float64Array = None, + initial_value: Union[float, Float64Array, None] = None, ) -> tuple[Float64Array, Float64Array]: params = np.asarray(ensure1d(parameters, "parameters", False), dtype=float) params = self._repack_parameters(params) @@ -3650,7 +3664,7 @@ def _simulation_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, @@ -3705,7 +3719,7 @@ def _analytic_forecast( self, parameters: Float64Array, resids: Float64Array, - backcast: float | Float64Array, + backcast: Union[float, Float64Array], var_bounds: Float64Array, start: int, horizon: int, diff --git a/arch/utility/__init__.py b/arch/utility/__init__.py index 97e65163d5..847cc8aa5d 100644 --- a/arch/utility/__init__.py +++ b/arch/utility/__init__.py @@ -1,6 +1,5 @@ -from __future__ import annotations - import os +from typing import Union from arch.utility.cov import cov_nw @@ -8,7 +7,7 @@ def test( - extra_args: str | list[str] | None = None, + extra_args: Union[str, list[str], None] = None, exit: bool = True, append: bool = True, location: str = "", @@ -49,7 +48,8 @@ def test( if not os.path.exists(pkg_loc): raise RuntimeError(f"{pkg_loc} was not found. Unable to run tests") cmd = [pkg_loc] + cmd - print("running: pytest {}".format(" ".join(cmd))) + cmd_str = " ".join(cmd) + print(f"running: pytest {cmd_str}") status = pytest.main(cmd) if exit: # pragma: no cover sys.exit(status) diff --git a/arch/utility/array.py b/arch/utility/array.py index 23d9e6e108..3b53e08edb 100644 --- a/arch/utility/array.py +++ b/arch/utility/array.py @@ -2,15 +2,13 @@ Utility functions that do not explicitly relate to Volatility modeling """ -from __future__ import annotations - from arch.compat.pandas import is_datetime64_any_dtype from abc import ABCMeta from collections.abc import Hashable, Sequence import datetime as dt from functools import cached_property -from typing import Any, Literal, overload +from typing import Any, Literal, Optional, Union, overload import numpy as np from pandas import DataFrame, DatetimeIndex, Index, NaT, Series, Timestamp, to_datetime @@ -35,8 +33,8 @@ @overload def ensure1d( - x: int | float | Sequence[int | float] | ArrayLike, - name: Hashable | None, + x: Union[int, float, Sequence[Union[int, float]], ArrayLike], + name: Optional[Hashable], series: Literal[True] = ..., ) -> Series: # pragma: no cover ... # pragma: no cover @@ -44,18 +42,18 @@ def ensure1d( @overload def ensure1d( - x: int | float | Sequence[int | float] | ArrayLike, - name: Hashable | None, + x: Union[int, float, Sequence[Union[int, float]], ArrayLike], + name: Optional[Hashable], series: Literal[False], ) -> np.ndarray: # pragma: no cover ... # pragma: no cover def ensure1d( - x: int | float | Sequence[int | float] | ArrayLike, # noqa: E231 - name: Hashable | None, + x: Union[int, float, Sequence[Union[int, float]], ArrayLike], # noqa: E231 + name: Optional[Hashable], series: bool = False, -) -> NDArray | Series: +) -> Union[NDArray, Series]: if isinstance(x, Series): if not isinstance(x.name, str): x.name = str(x.name) @@ -87,9 +85,11 @@ def ensure1d( def ensure2d( - x: Sequence[float | int] | Sequence[Sequence[float | int]] | ArrayLike, + x: Union[ + Sequence[Union[float, int]], Sequence[Sequence[Union[float, int]]], ArrayLike + ], name: str, -) -> DataFrame | NDArray: +) -> Union[DataFrame, NDArray]: if isinstance(x, Series): return DataFrame(x) elif isinstance(x, DataFrame): @@ -107,13 +107,11 @@ def ensure2d( raise TypeError("Variable " + name + "must be a Series, DataFrame or ndarray.") -def parse_dataframe( - x: ArrayLike | None, name: str | list[str] -) -> ( - tuple[Index, Index] - | tuple[list[Hashable | None], Index] - | tuple[list[str], NDArray] -): +def parse_dataframe(x: Optional[ArrayLike], name: Union[str, list[str]]) -> Union[ + tuple[Index, Index], + tuple[list[Optional[Hashable]], Index], + tuple[list[str], NDArray], +]: if x is None: assert isinstance(name, str) return [name], np.empty(0) @@ -185,8 +183,8 @@ class AbstractDocStringInheritor(ConcreteClassMeta, DocStringInheritor): def date_to_index( - date: str | dt.date | dt.datetime | np.datetime64 | Timestamp, - date_index: DatetimeIndex | NDArray | Series[Timestamp], + date: Union[str, dt.date, dt.datetime, np.datetime64, Timestamp], + date_index: Union[DatetimeIndex, NDArray, "Series[Timestamp]"], ) -> int: """ Looks up a date in an array of dates @@ -255,7 +253,9 @@ def date_to_index( return int(loc) -def cutoff_to_index(cutoff: None | int | DateLike, index: Index, default: int) -> int: +def cutoff_to_index( + cutoff: Union[None, int, DateLike], index: Index, default: int +) -> int: """ Converts a cutoff to a numerical index @@ -284,7 +284,7 @@ def cutoff_to_index(cutoff: None | int | DateLike, index: Index, default: int) - return int_index -def find_index(s: AnyPandas, index: int | DateLike) -> int: +def find_index(s: AnyPandas, index: Union[int, DateLike]) -> int: """ Returns the numeric index for a string or datetime diff --git a/arch/utility/cov.py b/arch/utility/cov.py index 36edb77598..f39385413a 100644 --- a/arch/utility/cov.py +++ b/arch/utility/cov.py @@ -1,4 +1,4 @@ -from __future__ import annotations +from typing import Union from numpy import asarray, squeeze @@ -7,7 +7,7 @@ def cov_nw( y: Float64Array, lags: int = 0, demean: bool = True, axis: int = 0, ddof: int = 0 -) -> Float64Array | float: +) -> Union[Float64Array, float]: """ Computes Newey-West covariance for 1-d and 2-d arrays diff --git a/arch/utility/io.py b/arch/utility/io.py index a96547a500..32af89fcab 100644 --- a/arch/utility/io.py +++ b/arch/utility/io.py @@ -1,5 +1,3 @@ -from __future__ import annotations - import numpy as np __all__ = ["str_format", "pval_format"] diff --git a/arch/utility/testing.py b/arch/utility/testing.py index ded775bdf4..1954626576 100644 --- a/arch/utility/testing.py +++ b/arch/utility/testing.py @@ -1,5 +1,3 @@ -from __future__ import annotations - from functools import cached_property from scipy.stats import chi2 @@ -67,17 +65,9 @@ def alternative(self) -> str: def __str__(self) -> str: name = "" if not self._name else self._name + "\n" - msg = ( - "{name}H0: {null}\n{name}H1: {alternative}\nStatistic: {stat:0.4f}\n" - "P-value: {pval:0.4f}\nDistributed: {dist}" - ) - return msg.format( - name=name, - null=self.null, - alternative=self.alternative, - stat=self.stat, - pval=self.pval, - dist=self.dist_name, + return ( + f"{name}H0: {self.null}\n{name}H1: {self.alternative}\nStatistic: {self.stat:0.4f}\n" + f"P-value: {self.pval:0.4f}\nDistributed: {self.dist}" ) def __repr__(self) -> str: diff --git a/arch/utility/timeseries.py b/arch/utility/timeseries.py index 23589170d0..564760127e 100644 --- a/arch/utility/timeseries.py +++ b/arch/utility/timeseries.py @@ -1,6 +1,4 @@ -from __future__ import annotations - -from typing import overload +from typing import Optional, Union, overload import numpy as np import pandas as pd @@ -75,12 +73,12 @@ def add_trend( def add_trend( - x: NDArrayOrFrame | None = None, + x: Optional[NDArrayOrFrame] = None, trend: Literal["n", "c", "t", "ct", "ctt"] = "c", prepend: bool = False, - nobs: int | None = None, + nobs: Optional[int] = None, has_constant: Literal["raise", "add", "skip"] = "skip", -) -> Float64Array | pd.DataFrame: +) -> Union[Float64Array, pd.DataFrame]: """ Adds a trend and/or constant to an array. @@ -133,7 +131,7 @@ def add_trend( nobs = len(np.asanyarray(x)) elif nobs is None or nobs <= 0: raise ValueError("nobs must be a positive integer if x is None") - trend_array = np.vander(np.arange(1, nobs + 1, dtype=np.float64), trend_order + 1) + trend_array = np.vander(np.arange(1, nobs + 1, dtype=np.double), trend_order + 1) # put in order ctt trend_array = np.fliplr(trend_array) if trend_name == "t": diff --git a/doc/source/bootstrap/iid-bootstraps.rst b/doc/source/bootstrap/iid-bootstraps.rst index 6159d0ef62..0f45c462c4 100644 --- a/doc/source/bootstrap/iid-bootstraps.rst +++ b/doc/source/bootstrap/iid-bootstraps.rst @@ -21,4 +21,4 @@ type of data arises naturally in experimental settings, e.g., website A/B testin .. autosummary:: :toctree: generated/ - ~arch.bootstrap.IndependentSamplesBootstrap + IndependentSamplesBootstrap diff --git a/doc/source/changes.rst b/doc/source/changes.rst index 8bbb2038d9..1a47086c0c 100644 --- a/doc/source/changes.rst +++ b/doc/source/changes.rst @@ -1,7 +1,7 @@ Change Logs ----------- -.. include:: changes/6.0.rst +.. include:: changes/7.0.rst ============= Past Releases @@ -10,7 +10,6 @@ Past Releases .. toctree:: :maxdepth: 1 - changes/7.0 changes/6.0 changes/5.0 changes/4.0 diff --git a/doc/source/changes/4.0.rst b/doc/source/changes/4.0.rst index 5f87c7c3b4..a2eda07eea 100644 --- a/doc/source/changes/4.0.rst +++ b/doc/source/changes/4.0.rst @@ -39,7 +39,7 @@ Release 4.16 Release 4.15 ============ - This is a minor release with doc fixes and other small updates. The only notable - feature is :meth:`~arch.unitroot.unitroot.PhillipsPerron.regression` which returns + feature is :meth:`~arch.unitroot.PhillipsPerron.regression` which returns regression results from the model estimated as part of the test (:issue:`395`). Release 4.14 @@ -106,7 +106,7 @@ Release 4.9 - Added :func:`~arch.unitroot.auto_bandwidth` to compute optimized bandwidth for a number of common kernel covariance estimators (:issue:`303`). This code was written by Michael Rabba. -- Added a parameter `rescale` to :func:`~arch.univariate.mean.arch_model` +- Added a parameter `rescale` to :func:`~arch.univariate.arch_model` that allows the estimator to rescale data if it may help parameter estimation. If `rescale=True`, then the data will be rescaled by a power of 10 (e.g., 10, 100, or 1000) to produce a series with a residual diff --git a/doc/source/changes/5.0.rst b/doc/source/changes/5.0.rst index eaab9d754f..dab58fe602 100644 --- a/doc/source/changes/5.0.rst +++ b/doc/source/changes/5.0.rst @@ -75,12 +75,12 @@ Volatility Modeling instances and integers. If an integer is passed, the random number generator is constructed by calling :func:`numpy.random.default_rng` The ``seed`` keyword argument replaces the ``random_state`` keyword argument. -- The :meth:`~arch.univariate.distribution.Normal.random_state` property has also been deprecated in favor - of :meth:`~arch.univariate.distribution.Normal.generator`. +- The ``Normal.random_state`` property has also been deprecated in favor + of :meth:`~arch.univariate.Normal.generator`. - Added :class:`~arch.univariate.ARCHInMean` mean process supporting (G)ARCH-in-mean models. - Extended :class:`~arch.univariate.volatility.VolatilityProcess` with :func:`~arch.univariate.volatility.VolatilityProcess.volatility_updater` that contains a - :class:`~arch.univariate.recursions_python.VolatilityUpdater` to allow + :class:`~arch.univariate.recursions.VolatilityUpdater` to allow :class:`~arch.univariate.ARCHInMean` to be created from different volatility processes. Setup diff --git a/doc/source/changes/7.0.rst b/doc/source/changes/7.0.rst index e20fc83bc6..dea5c6b34e 100644 --- a/doc/source/changes/7.0.rst +++ b/doc/source/changes/7.0.rst @@ -2,6 +2,11 @@ Version 7 ========= +Release 7.1 +=========== +- Improve documentation +- Improve typing of a small number of functions + Release 7.0 =========== - Full compatability with NumPy 2 diff --git a/doc/source/conf.py b/doc/source/conf.py index 5361fd49a7..42a8f70f22 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -53,7 +53,9 @@ # extensions coming with Sphinx (named "sphinx.ext.*") or your custom # ones. extensions = [ + # One of the next two only "sphinx.ext.napoleon", + # "numpydoc", "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.extlinks", @@ -90,7 +92,7 @@ # You can specify multiple suffix as a list of string: # # source_suffix = [".rst", ".md"] -source_suffix = ".rst" +source_suffix = {".rst": "restructuredtext"} # The master toctree document. master_doc = "index" @@ -280,10 +282,12 @@ extlinks = {"issue": ("https://github.com/bashtage/arch/issues/%s", "GH%s")} + +napoleon_google_docstring = False napoleon_use_admonition_for_examples = False napoleon_use_admonition_for_notes = False napoleon_use_admonition_for_references = False - +napoleon_attr_annotations = True napoleon_preprocess_types = True napoleon_use_param = True napoleon_type_aliases = { @@ -294,32 +298,51 @@ "AxesSubplot": "matplotlib.axes.Axes", "DataFrame": "pandas.DataFrame", "Series": "pandas.Series", - "csc_matrix": "scipy.sparse.csc_matrix", - "DataArray": "xarray.DataArray", "ndarray": "numpy.ndarray", "np.ndarray": "numpy.array", "pd.Series": "pandas.Series", "RandomState": "numpy.random.RandomState", "Generator": "numpy.random.Generator", + "float64": "numpy.double", + "numpy.float64": "numpy.double", + "OptimizeResult": "scipy.optimize.OptimizeResult", "VarianceForecast": "arch.univariate.volatility.VarianceForecast", + "CovarianceEstimator": "arch.covariance.kernel.CovarianceEstimator", + "CovarianceEstimate": "arch.covariance.kernel.CovarianceEstimate", + "VolatilityUpdater": "arch.univariate.recursions.VolatilityUpdater", + "ARCHModel": "arch.univariate.base.ARCHModel", + "ARCHModelResult": "arch.univariate.base.ARCHModelResult", + "ARCHModelFixedResult": "arch.univariate.base.ARCHModelFixedResult", } numpydoc_use_autodoc_signature = True numpydoc_xref_param_type = True numpydoc_class_members_toctree = False numpydoc_xref_aliases = { + "array-like": ":term:`array-like `", + "array_like": ":term:`array_like`", "Figure": "matplotlib.figure.Figure", "Axes": "matplotlib.axes.Axes", "AxesSubplot": "matplotlib.axes.Axes", "DataFrame": "pandas.DataFrame", "Series": "pandas.Series", "ndarray": "numpy.ndarray", - "RandomState": "numpy.random.RandomState", - "VarianceForecast": "arch.univariate.volatility.VarianceForecast", - "Generator": "numpy.random.Generator", "np.ndarray": "numpy.array", "pd.Series": "pandas.Series", + "RandomState": "numpy.random.RandomState", + "Generator": "numpy.random.Generator", + "float64": "numpy.double", + "numpy.float64": "numpy.double", + "OptimizeResult": "scipy.optimize.OptimizeResult", + "VarianceForecast": "arch.univariate.volatility.VarianceForecast", + "CovarianceEstimator": "arch.covariance.kernel.CovarianceEstimator", + "CovarianceEstimate": "arch.covariance.kernel.CovarianceEstimate", + "VolatilityUpdater": "arch.univariate.recursions.VolatilityUpdater", + "ARCHModel": "arch.univariate.base.ARCHModel", + "ARCHModelResult": "arch.univariate.base.ARCHModelResult", + "ARCHModelFixedResult": "arch.univariate.base.ARCHModelFixedResult", } +numpydoc_xref_ignore = {"type", "optional", "default"} autosummary_generate = True autoclass_content = "class" diff --git a/doc/source/covariance/covariance.rst b/doc/source/covariance/covariance.rst index 95c393a316..fe131a91cd 100644 --- a/doc/source/covariance/covariance.rst +++ b/doc/source/covariance/covariance.rst @@ -33,3 +33,13 @@ Results :toctree: generated/ CovarianceEstimate + + +Base Class +---------- +All long-run covariance estimators inherit from :class:`~arch.covariance.kernel.CovarianceEstimator`. + +.. autosummary:: + :toctree: generated/ + + CovarianceEstimator diff --git a/doc/source/index.rst b/doc/source/index.rst index 88627d7d1c..9735d074c1 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -36,13 +36,15 @@ routines relevant for the analysis of financial data. Unit Root Tests and Cointegration Analysis Long-run Covariance Estimation API Reference + Common Type Definitions Change Log Citation ======== -This package should be cited using Zenodo. For example, for the 4.13 release, +This package should be cited using Zenodo. For example, for the 4.13 release please +cite [arch]_ as: -.. [*] Kevin Sheppard (2021, March 3). bashtage/arch: Release 4.18 (Version v4.18). +.. [arch] Kevin Sheppard (2021, March 3). bashtage/arch: Release 4.18 (Version v4.18). Zenodo. https://doi.org/10.5281/zenodo.593254 .. image:: https://zenodo.org/badge/doi/10.5281/zenodo.593254.svg diff --git a/doc/source/multiple-comparison/multiple-comparison-reference.rst b/doc/source/multiple-comparison/multiple-comparison-reference.rst index ae613c1a53..7fce19e97e 100644 --- a/doc/source/multiple-comparison/multiple-comparison-reference.rst +++ b/doc/source/multiple-comparison/multiple-comparison-reference.rst @@ -34,7 +34,7 @@ there are potentially alternative models being considered. .. autosummary:: :toctree: generated/ - ~arch.bootstrap.SPA + SPA Stepwise Multiple Testing (StepM) @@ -50,7 +50,7 @@ to allow multiple calls. .. autosummary:: :toctree: generated/ - ~arch.bootstrap.StepM + StepM Model Confidence Set (MCS) ========================== @@ -65,4 +65,4 @@ Familywise Error Rate rather than the usual test size. .. autosummary:: :toctree: generated/ - ~arch.bootstrap.MCS + MCS diff --git a/doc/source/types.rst b/doc/source/types.rst new file mode 100644 index 0000000000..45a8783643 --- /dev/null +++ b/doc/source/types.rst @@ -0,0 +1,33 @@ +Common Type Definitions +======================== + +.. py:currentmodule:: arch.typing + +Array Types +----------- + +.. autoclass:: ArrayLike1D +.. autoclass:: ArrayLike2D +.. autoclass:: ArrayLike +.. autoclass:: NDArray +.. autoclass:: Float64Array +.. autoclass:: Int64Array +.. autoclass:: Int32Array +.. autoclass:: IntArray +.. autoclass:: BoolArray +.. autoclass:: AnyArray +.. autoclass:: Uint32Array +.. autoclass:: FloatOrArray +.. autoclass:: NDArrayOrFrame +.. autoclass:: AnyPandas + +Other Types +----------- + +.. autoclass:: RandomStateState +.. autoclass:: RNGType +.. autoclass:: BootstrapIndexT +.. autoclass:: DateLike +.. autoclass:: Label +.. autoclass:: UnitRootTrend +.. autoclass:: ForecastingMethod diff --git a/doc/source/univariate/forecasting.rst b/doc/source/univariate/forecasting.rst index 0764475a73..87d55f6358 100644 --- a/doc/source/univariate/forecasting.rst +++ b/doc/source/univariate/forecasting.rst @@ -244,3 +244,10 @@ Output Classes ARCHModelForecast ARCHModelForecastSimulation + +.. currentmodule:: arch.univariate.volatility + +.. autosummary:: + :toctree: generated/ + + VarianceForecast diff --git a/doc/source/univariate/volatility.rst b/doc/source/univariate/volatility.rst index 29492bb393..b3bda68573 100644 --- a/doc/source/univariate/volatility.rst +++ b/doc/source/univariate/volatility.rst @@ -56,10 +56,10 @@ all public methods. VolatilityProcess They may optionally expose a -:class:`~arch.univariate.recursions_python.VolatilityUpdater` class +:class:`~arch.univariate.recursions.VolatilityUpdater` class that can be used in :class:`~arch.univariate.ARCHInMean` estimation. -.. currentmodule:: arch.univariate.recursions_python +.. currentmodule:: arch.univariate.recursions .. autosummary:: :toctree: generated/ diff --git a/examples/unitroot_examples.ipynb b/examples/unitroot_examples.ipynb index 19b59a4cfa..5438d87f11 100644 --- a/examples/unitroot_examples.ipynb +++ b/examples/unitroot_examples.ipynb @@ -26,6 +26,7 @@ "import seaborn\n", "\n", "warnings.simplefilter(\"ignore\")\n", + "\n", "seaborn.set_style(\"darkgrid\")\n", "plt.rc(\"figure\", figsize=(16, 6))\n", "plt.rc(\"savefig\", dpi=90)\n", diff --git a/examples/univariate_volatility_modeling.ipynb b/examples/univariate_volatility_modeling.ipynb index a3f372653a..4b54fb3aa4 100644 --- a/examples/univariate_volatility_modeling.ipynb +++ b/examples/univariate_volatility_modeling.ipynb @@ -24,7 +24,6 @@ }, "outputs": [], "source": [ - "%matplotlib inline\n", "import matplotlib.pyplot as plt\n", "import seaborn\n", "\n",