diff --git a/CHANGELOG.md b/CHANGELOG.md index f76ea4a86..f58d0485f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## Features +- [#635](https://github.com/pybop-team/PyBOP/pull/635) - Adds support for multi-proposal evaluation of list-like objects to `BaseCost` classes. +- [#635](https://github.com/pybop-team/PyBOP/pull/635) - Adds global parameter sensitivity analysis with method `BaseCost.sensitivity_analysis`. This is computation is added to `OptimisationResult` if optimiser arg `compute_sensitivities` is `True`. An additional arg is added to select the number of samples for analysis: `n_sensitivity_samples`. - [#630] (https://github.com/pybop-team/PyBOP/pull/632) - Fisher Information Matrix added to `BaseLikelihood` class. - [#619](https://github.com/pybop-team/PyBOP/pull/619) - Adds `pybop.SimulatingAnnealing` optimiser with corresponding tests. - [#565](https://github.com/pybop-team/PyBOP/pull/627) - DigiBatt added as funding partner. diff --git a/examples/scripts/comparison_examples/adamw.py b/examples/scripts/comparison_examples/adamw.py index 5533af925..cb6bec542 100644 --- a/examples/scripts/comparison_examples/adamw.py +++ b/examples/scripts/comparison_examples/adamw.py @@ -13,10 +13,14 @@ pybop.Parameter( "Negative electrode active material volume fraction", prior=pybop.Gaussian(0.68, 0.05), + initial_value=0.45, + bounds=[0.4, 0.9], ), pybop.Parameter( "Positive electrode active material volume fraction", prior=pybop.Gaussian(0.58, 0.05), + initial_value=0.45, + bounds=[0.4, 0.9], ), ) @@ -52,15 +56,22 @@ def noise(sigma): signal = ["Voltage [V]", "Bulk open-circuit voltage [V]"] # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset, signal=signal) -cost = pybop.Minkowski(problem, p=2) +cost = pybop.SumofPower(problem, p=2.5) + optim = pybop.AdamW( cost, verbose=True, allow_infeasible_solutions=True, sigma0=0.02, max_iterations=100, - max_unchanged_iterations=20, + max_unchanged_iterations=45, + compute_sensitivities=True, + n_sensitivity_samples=128, ) +# Reduce the momentum influence +# for the reduced number of optimiser iterations +optim.optimiser.b1 = 0.9 +optim.optimiser.b2 = 0.9 # Run optimisation results = optim.run() diff --git a/examples/scripts/comparison_examples/covariance_matrix_adaptation.py b/examples/scripts/comparison_examples/covariance_matrix_adaptation.py index dd01dea77..447a63670 100644 --- a/examples/scripts/comparison_examples/covariance_matrix_adaptation.py +++ b/examples/scripts/comparison_examples/covariance_matrix_adaptation.py @@ -44,7 +44,9 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset, signal=signal) cost = pybop.SumSquaredError(problem) -optim = pybop.CMAES(cost, sigma0=0.25, max_unchanged_iterations=10, max_iterations=40) +optim = pybop.CMAES( + cost, sigma0=0.25, max_unchanged_iterations=10, max_iterations=40, multistart=2 +) # Run the optimisation results = optim.run() @@ -54,7 +56,7 @@ pybop.plot.dataset(dataset) # Plot the timeseries output -pybop.plot.quick(problem, problem_inputs=results.x, title="Optimised Comparison") +pybop.plot.quick(problem, problem_inputs=results.x_best, title="Optimised Comparison") # Plot convergence pybop.plot.convergence(optim) diff --git a/examples/scripts/comparison_examples/scipy_minimize.py b/examples/scripts/comparison_examples/scipy_minimize.py index 78037f101..4dfb41686 100644 --- a/examples/scripts/comparison_examples/scipy_minimize.py +++ b/examples/scripts/comparison_examples/scipy_minimize.py @@ -37,8 +37,15 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) -cost = pybop.JaxSumSquaredError(problem) -optim = pybop.SciPyMinimize(cost, max_iterations=100, method="L-BFGS-B", jac=True) +cost = pybop.SumSquaredError(problem) +optim = pybop.SciPyMinimize( + cost, + max_iterations=100, + multistart=1, + method="L-BFGS-B", + jac=True, + n_sensitivity_samples=256, +) results = optim.run() diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 46a19b77c..d2dcd7a8e 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,6 +1,8 @@ from typing import Optional, Union import numpy as np +from SALib.analyze import sobol +from SALib.sample.sobol import sample from pybop import BaseProblem from pybop._utils import add_spaces @@ -61,75 +63,107 @@ def __init__(self, problem: Optional[BaseProblem] = None): def __call__( self, - inputs: Union[Inputs, list], + inputs: Union[Inputs, list, np.ndarray], calculate_grad: bool = False, apply_transform: bool = False, for_optimiser: bool = False, - ) -> Union[float, tuple[float, np.ndarray]]: + ) -> Union[float, list, tuple[float, np.ndarray], list[tuple[float, np.ndarray]]]: """ - This method calls the forward model via problem.evaluate(inputs), - and computes the cost for the given output by calling self.compute(). + Compute cost and optional gradient for given input parameters. Parameters ---------- - inputs : Inputs or list-like - The input parameters for which the cost and optionally the gradient - will be computed. - calculate_grad : bool, optional, default=False + inputs : Union[Inputs, list, np.ndarray] + Input parameters for cost computation. Supports list-like evaluation of + multiple input values, shaped [N,M] where N is the number of input positions + to evaluate and M is the number of inputs for the underlying model (i.e. parameters). + calculate_grad : bool, default=False If True, both the cost and gradient will be computed. Otherwise, only the cost is computed. - apply_transform : bool, optional, default=False + apply_transform : bool, default=False If True, applies a transformation to the inputs before evaluating the model. - for_optimiser : bool, optional, default=False - If True, returns the cost value if self.minimising=True and the negative of - the cost value if self.minimising=False (i.e. the cost is being maximised). + for_optimiser : bool, default=False + If True, adjusts output sign based on minimisation/maximisation setting. Returns ------- - float or tuple - - If `calculate_grad` is False, returns the computed cost (float). - - If `calculate_grad` is True, returns a tuple containing the cost (float) - and the gradient (np.ndarray). - - Raises - ------ - ValueError - If an error occurs during the calculation of the cost. + Union[float, list, tuple[float, np.ndarray], list[tuple[float, np.ndarray]]] + - Single input, no gradient: float + - Multiple inputs, no gradient: list[float] + - Single input with gradient: tuple[float, np.ndarray] + - Multiple inputs with gradient: list[tuple[float, np.ndarray]] """ - # Note, we use the transformation and parameter properties here to enable - # differing attributes within the `LogPosterior` class - self.has_transform = self.transformation is not None and apply_transform - model_inputs = self.parameters.verify(self._apply_transformations(inputs)) - self.parameters.update(values=list(model_inputs.values())) + # Convert dict to list for sequential computations + if isinstance(inputs, dict): + inputs = list(inputs.values()) + input_list = np.atleast_2d(inputs) - # Check whether we are maximising or minimising via: - # | `minimising` | `self.minimising` | `for_optimiser` | - # |--------------|-------------------|-----------------| - # | `True` | `True` | `True` | - # | `True` | `True` | `False` | - # | `False` | `False` | `True` | - # | `True` | `False` | `False` | minimising = self.minimising or not for_optimiser + sign = 1 if minimising else -1 - y = self.DeferredPrediction - dy = self.DeferredPrediction if calculate_grad else None + results = [] + for input_val in input_list: + result = self._evaluate_single_input( + input_val, + calculate_grad=calculate_grad, + apply_transform=apply_transform, + sign=sign, + ) + results.append(result) + + return results[0] if len(input_list) == 1 else results + + def _evaluate_single_input( + self, + input_value: Union[Inputs, np.ndarray], + calculate_grad: bool, + apply_transform: bool, + sign: int, + ) -> Union[float, tuple[float, np.ndarray]]: + """Evaluate cost (and optional gradient) for a single input.""" + # Setup input transformation + self.has_transform = self.transformation is not None and apply_transform + model_inputs = self.parameters.verify(self._apply_transformations(input_value)) + self.parameters.update(values=list(model_inputs.values())) if self._has_separable_problem: - if calculate_grad: - y, dy = self.problem.evaluateS1(self.problem.parameters.as_dict()) - cost, grad = self.compute(y, dy=dy) + return self._evaluate_separable_problem( + input_value, calculate_grad=calculate_grad, sign=sign + ) - if self.has_transform and np.isfinite(cost): - jac = self.transformation.jacobian(inputs) - grad = np.matmul(grad, jac) + return self._evaluate_non_separable_problem( + calculate_grad=calculate_grad, sign=sign + ) - return cost * (1 if minimising else -1), grad * ( - 1 if minimising else -1 - ) + def _evaluate_separable_problem( + self, input_value: Union[Inputs, np.ndarray], calculate_grad: bool, sign: int + ) -> Union[float, tuple[float, np.ndarray]]: + """Evaluation for separable problems.""" + if calculate_grad: + y, dy = self.problem.evaluateS1(self.problem.parameters.as_dict()) + cost, grad = self.compute(y, dy=dy) + + if self.has_transform and np.isfinite(cost): + jac = self.transformation.jacobian(input_value) + grad = np.matmul(grad, jac) - y = self.problem.evaluate(self.problem.parameters.as_dict()) + return cost * sign, grad * sign - return self.compute(y, dy=dy) * (1 if minimising else -1) + y = self.problem.evaluate(self.problem.parameters.as_dict()) + return self.compute(y, dy=None) * sign + + def _evaluate_non_separable_problem( + self, calculate_grad: bool, sign: int + ) -> Union[float, tuple[float, np.ndarray]]: + """Evaluation for non-separable problems.""" + y = self.DeferredPrediction + dy = self.DeferredPrediction if calculate_grad else None + + if calculate_grad: + cost, grad = self.compute(y, dy=dy) + return cost * sign, grad * sign + + return self.compute(y, dy=dy) * sign def _apply_transformations(self, inputs): """Apply transformation if needed""" @@ -157,6 +191,37 @@ def compute(self, y: dict, dy: Optional[np.ndarray]): """ raise NotImplementedError + def sensitivity_analysis(self, n_samples: int = 256): + """ + Computes the parameter sensitivities on the cost function using + SOBOL analyse from the SALib module [1]. + + Parameters + ---------- + n_samples : int, optional + Number of samples for SOBOL sensitivity analysis, + performs best as order of 2, i.e. 128, 256, etc. + + References + ---------- + .. [1] Iwanaga, T., Usher, W., & Herman, J. (2022). Toward SALib 2.0: + Advancing the accessibility and interpretability of global sensitivity + analyses. Socio-Environmental Systems Modelling, 4, 18155. doi:10.18174/sesmo.18155 + + Returns + ------- + Sensitivities : dict + """ + + salib_dict = { + "names": self.parameters.keys(), + "bounds": self.parameters.get_bounds_for_plotly(), + "num_vars": len(self.parameters.keys()), + } + + param_values = sample(salib_dict, n_samples) + return sobol.analyze(salib_dict, np.asarray(self.__call__(param_values))) + def set_fail_gradient(self, de: float = 1.0): """ Set the fail gradient to a specified value. diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index 3bd398384..3aa1bd55b 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -146,6 +146,14 @@ def set_base_options(self): # Set multistart self.multistart = self.unset_options.pop("multistart", 1) + # Parameter sensitivities + self.compute_sensitivities = self.unset_options.pop( + "compute_sensitivities", False + ) + self.n_samples_sensitivity = self.unset_options.pop( + "n_sensitivity_samples", 256 + ) + def _set_up_optimiser(self): """ Parse optimiser options and prepare the optimiser. @@ -214,6 +222,9 @@ def run(self): # Store the optimised parameters self.parameters.update(values=self.result.x_best) + # Compute sensitivities + self.result.sensitivities = self._parameter_sensitivities() + if self.verbose: print(self.result) @@ -232,6 +243,12 @@ def _run(self): """ raise NotImplementedError + def _parameter_sensitivities(self): + if not self.compute_sensitivities: + return None + + return self.cost.sensitivity_analysis(self.n_samples_sensitivity) + def log_update(self, x=None, x_best=None, cost=None, cost_best=None, x0=None): """ Update the log with new values. @@ -364,6 +381,7 @@ def __init__( optim: BaseOptimiser, x: Union[Inputs, np.ndarray] = None, final_cost: Optional[float] = None, + sensitivities: Optional[dict] = None, n_iterations: Optional[int] = None, n_evaluations: Optional[int] = None, time: Optional[float] = None, @@ -377,6 +395,7 @@ def __init__( self._best_run = None self._x = [] self._final_cost = [] + self._sensitivities = None self._fisher = [] self._n_iterations = [] self._n_evaluations = [] @@ -527,11 +546,21 @@ def __str__(self) -> str: Returns: str: A formatted string containing optimisation result information. """ + # Format the sensitivities + self.sense_format = "" + if self._sensitivities: + self.sense_format = "" + for value, conf in zip( + self._sensitivities["ST"], self._sensitivities["ST_conf"] + ): + self.sense_format += f" {value:.3f} ± {conf:.3f}," + return ( f"OptimisationResult:\n" f" Best result from {self.n_runs} run(s).\n" f" Initial parameters: {self.x0_best}\n" f" Optimised parameters: {self.x_best}\n" + f" Total-order sensitivities:{self.sense_format}\n" f" Diagonal Fisher Information entries: {self.fisher_best}\n" f" Final cost: {self.final_cost_best}\n" f" Optimisation time: {self.time_best} seconds\n" @@ -553,14 +582,14 @@ def _get_single_or_all(self, attr): value = getattr(self, attr) return value[0] if len(value) == 1 else value - @property - def x_best(self): - return self._x[self._best_run] if self._best_run is not None else None - @property def x(self): return self._get_single_or_all("_x") + @property + def x_best(self): + return self._x[self._best_run] if self._best_run is not None else None + @property def x0(self): return self._get_single_or_all("_x0") @@ -581,6 +610,14 @@ def final_cost_best(self): def fisher(self): return self._get_single_or_all("_fisher") + @property + def sensitivities(self): + return self._get_single_or_all("_sensitivities") + + @sensitivities.setter + def sensitivities(self, obj: dict): + self._sensitivities = obj + @property def fisher_best(self): return self._fisher[self._best_run] if self._best_run is not None else None diff --git a/pybop/transformation/base_transformation.py b/pybop/transformation/base_transformation.py index a36ba8081..4ddf57191 100644 --- a/pybop/transformation/base_transformation.py +++ b/pybop/transformation/base_transformation.py @@ -17,16 +17,12 @@ class Transformation(ABC): Based on pints.transformation method. References - ---------- + ----------- .. [1] Erik Jorgensen and Asger Roer Pedersen. "How to Obtain Those Nasty Standard Errors From Transformed Data." http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.47.9023 .. [2] Kaare Brandt Petersen and Michael Syskind Pedersen. "The Matrix Cookbook." 2012. - """ - # ---- To be implemented with Monte Carlo PR ------ # - # def convert_log_prior(self, log_prior): - # """Returns a transformed log-prior class.""" - # return TransformedLogPrior(log_prior, self) + """ def convert_covariance_matrix(self, cov: np.ndarray, q: np.ndarray) -> np.ndarray: """ diff --git a/pyproject.toml b/pyproject.toml index a82f23398..c3a89fa46 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,10 +26,11 @@ classifiers = [ ] requires-python = ">=3.9, <3.13" dependencies = [ - "pybamm[jax]>=24.9", - "numpy>=1.16, <2.0", - "scipy>=1.3", - "pints>=0.5", + "pybamm[jax]>=24.9", + "numpy>=1.16, <2.0", + "scipy>=1.3", + "pints>=0.5", + "SALib>=1.5", ] [project.optional-dependencies] diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index c46831d3d..9ef7d0701 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -29,10 +29,12 @@ def ground_truth(self): @pytest.fixture def parameters(self): - return pybop.Parameter( - "Negative electrode active material volume fraction", - prior=pybop.Gaussian(0.5, 0.01), - bounds=[0.375, 0.625], + return pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.5, 0.01), + bounds=[0.375, 0.625], + ) ) @pytest.fixture @@ -116,7 +118,8 @@ def test_base(self, model, parameters, dataset): with pytest.raises(NotImplementedError): base_cost([0.5]) - def test_costs(self, cost): + def test_costs(self, cost, parameters): + # Test cost direction if isinstance(cost, pybop.BaseLikelihood): higher_cost = cost([0.52]) lower_cost = cost([0.55]) @@ -129,6 +132,7 @@ def test_costs(self, cost): # Test type of returned value assert np.isscalar(cost([0.5])) + assert np.isscalar(cost(parameters.as_dict())) # Test UserWarnings if isinstance(cost, (pybop.SumSquaredError, pybop.RootMeanSquaredError)): @@ -400,3 +404,20 @@ def test_mixed_problem_classes(self, problem, design_problem): match="All problems must be of the same class type.", ): pybop.WeightedCost(cost1, cost2) + + def test_parameter_sensitivities(self, problem): + cost = pybop.MeanAbsoluteError(problem) + result = cost.sensitivity_analysis(4) + + # Assertions + assert isinstance(result, dict) + assert "S1" in result + assert "ST" in result + assert isinstance(result["S1"], np.ndarray) + assert isinstance(result["S2"], np.ndarray) + assert isinstance(result["ST"], np.ndarray) + assert isinstance(result["S1_conf"], np.ndarray) + assert isinstance(result["ST_conf"], np.ndarray) + assert isinstance(result["S2_conf"], np.ndarray) + assert result["S1"].shape == (1,) + assert result["ST"].shape == (1,) diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index 40052b2c7..43053af28 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -605,6 +605,7 @@ def test_halting(self, cost): f" Best result from {results.n_runs} run(s).\n" f" Initial parameters: {results.x0}\n" f" Optimised parameters: {results.x}\n" + f" Total-order sensitivities:{results.sense_format}\n" f" Diagonal Fisher Information entries: {None}\n" f" Final cost: {results.final_cost}\n" f" Optimisation time: {results.time} seconds\n" @@ -652,6 +653,7 @@ def test_halting(self, cost): f" Best result from {results.n_runs} run(s).\n" f" Initial parameters: {results.x0}\n" f" Optimised parameters: {results.x}\n" + f" Total-order sensitivities:{results.sense_format}\n" f" Diagonal Fisher Information entries: {None}\n" f" Final cost: {results.final_cost}\n" f" Optimisation time: {results.time} seconds\n" @@ -728,7 +730,14 @@ def test_optimisation_results(self, cost): pybop.OptimisationResult(optim=optim, x=[1e-5], n_iterations=1) # Test list-like functionality with "best" properties - optim = pybop.Optimisation(cost=cost, n_iterations=1, multistart=3) + optim = pybop.Optimisation( + cost=cost, + x0=[0.6, 0.5], + n_iterations=1, + multistart=3, + compute_sensitivities=True, + n_samples_sensitivity=8, + ) results = optim.run() assert results.pybamm_solution_best in results.pybamm_solution @@ -738,3 +747,4 @@ def test_optimisation_results(self, cost): assert results.n_iterations_best in results.n_iterations assert results.n_evaluations_best in results.n_evaluations assert results.x0_best in results.x0 + assert isinstance(results.sensitivities, dict)