From 3a2a44e2edd0e1a73e29adc3a0742919a487fc13 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 27 Jan 2025 10:14:02 +0000 Subject: [PATCH 1/6] feat: adds global parameter sensitivity analysis, updates BaseCost to accept a list of proposals --- .../scripts/comparison_examples/spm_AdamW.py | 15 +- .../scripts/comparison_examples/spm_CMAES.py | 4 +- .../comparison_examples/spm_scipymin.py | 11 +- pybop/costs/base_cost.py | 134 ++++++++++++------ pybop/optimisers/base_optimiser.py | 49 ++++++- pyproject.toml | 9 +- tests/unit/test_optimisation.py | 2 + 7 files changed, 166 insertions(+), 58 deletions(-) diff --git a/examples/scripts/comparison_examples/spm_AdamW.py b/examples/scripts/comparison_examples/spm_AdamW.py index 5533af92..cb6bec54 100644 --- a/examples/scripts/comparison_examples/spm_AdamW.py +++ b/examples/scripts/comparison_examples/spm_AdamW.py @@ -13,10 +13,14 @@ pybop.Parameter( "Negative electrode active material volume fraction", prior=pybop.Gaussian(0.68, 0.05), + initial_value=0.45, + bounds=[0.4, 0.9], ), pybop.Parameter( "Positive electrode active material volume fraction", prior=pybop.Gaussian(0.58, 0.05), + initial_value=0.45, + bounds=[0.4, 0.9], ), ) @@ -52,15 +56,22 @@ def noise(sigma): signal = ["Voltage [V]", "Bulk open-circuit voltage [V]"] # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset, signal=signal) -cost = pybop.Minkowski(problem, p=2) +cost = pybop.SumofPower(problem, p=2.5) + optim = pybop.AdamW( cost, verbose=True, allow_infeasible_solutions=True, sigma0=0.02, max_iterations=100, - max_unchanged_iterations=20, + max_unchanged_iterations=45, + compute_sensitivities=True, + n_sensitivity_samples=128, ) +# Reduce the momentum influence +# for the reduced number of optimiser iterations +optim.optimiser.b1 = 0.9 +optim.optimiser.b2 = 0.9 # Run optimisation results = optim.run() diff --git a/examples/scripts/comparison_examples/spm_CMAES.py b/examples/scripts/comparison_examples/spm_CMAES.py index dd01dea7..d0906e5d 100644 --- a/examples/scripts/comparison_examples/spm_CMAES.py +++ b/examples/scripts/comparison_examples/spm_CMAES.py @@ -44,7 +44,9 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset, signal=signal) cost = pybop.SumSquaredError(problem) -optim = pybop.CMAES(cost, sigma0=0.25, max_unchanged_iterations=10, max_iterations=40) +optim = pybop.CMAES( + cost, sigma0=0.25, max_unchanged_iterations=10, max_iterations=40, multistart=2 +) # Run the optimisation results = optim.run() diff --git a/examples/scripts/comparison_examples/spm_scipymin.py b/examples/scripts/comparison_examples/spm_scipymin.py index 78037f10..4dfb4168 100644 --- a/examples/scripts/comparison_examples/spm_scipymin.py +++ b/examples/scripts/comparison_examples/spm_scipymin.py @@ -37,8 +37,15 @@ # Generate problem, cost function, and optimisation class problem = pybop.FittingProblem(model, parameters, dataset) -cost = pybop.JaxSumSquaredError(problem) -optim = pybop.SciPyMinimize(cost, max_iterations=100, method="L-BFGS-B", jac=True) +cost = pybop.SumSquaredError(problem) +optim = pybop.SciPyMinimize( + cost, + max_iterations=100, + multistart=1, + method="L-BFGS-B", + jac=True, + n_sensitivity_samples=256, +) results = optim.run() diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 86690b11..2e329114 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -1,6 +1,8 @@ from typing import Optional, Union import numpy as np +from SALib.analyze import sobol +from SALib.sample.sobol import sample from pybop import BaseProblem from pybop._utils import add_spaces @@ -61,75 +63,107 @@ def __init__(self, problem: Optional[BaseProblem] = None): def __call__( self, - inputs: Union[Inputs, list], + inputs: Union[Inputs, list, np.ndarray], calculate_grad: bool = False, apply_transform: bool = False, for_optimiser: bool = False, - ) -> Union[float, tuple[float, np.ndarray]]: + ) -> Union[float, list, tuple[float, np.ndarray], list[tuple[float, np.ndarray]]]: """ - This method calls the forward model via problem.evaluate(inputs), - and computes the cost for the given output by calling self.compute(). + Compute cost and optional gradient for given input parameters. Parameters ---------- - inputs : Inputs or list-like - The input parameters for which the cost and optionally the gradient - will be computed. - calculate_grad : bool, optional, default=False + inputs : Union[Inputs, list, np.ndarray] + Input parameters for cost computation. Supports single or multiple inputs. + calculate_grad : bool, default=False If True, both the cost and gradient will be computed. Otherwise, only the cost is computed. - apply_transform : bool, optional, default=False + apply_transform : bool, default=False If True, applies a transformation to the inputs before evaluating the model. - for_optimiser : bool, optional, default=False - If True, returns the cost value if self.minimising=True and the negative of - the cost value if self.minimising=False (i.e. the cost is being maximised). + for_optimiser : bool, default=False + If True, adjusts output sign based on minimisation/maximisation setting. Returns ------- - float or tuple - - If `calculate_grad` is False, returns the computed cost (float). - - If `calculate_grad` is True, returns a tuple containing the cost (float) - and the gradient (np.ndarray). + Union[float, list, tuple[float, np.ndarray], list[tuple[float, np.ndarray]]] + - Single input, no gradient: float + - Multiple inputs, no gradient: list[float] + - Single input with gradient: tuple[float, np.ndarray] + - Multiple inputs with gradient: list[tuple[float, np.ndarray]] + """ + # Convert dict to list for sequential computations + if isinstance(inputs, dict): + inputs = list(inputs.values()) - Raises - ------ - ValueError - If an error occurs during the calculation of the cost. + input_list = np.atleast_2d(inputs) + minimising = self.minimising or not for_optimiser + sign = 1 if minimising else -1 + + results = [] + for input_val in input_list: + result = self._evaluate_single_input( + input_val, + calculate_grad=calculate_grad, + apply_transform=apply_transform, + sign=sign, + ) + results.append(result) + + return results[0] if len(input_list) == 1 else results + + def _evaluate_single_input( + self, + input_value: Union[Inputs, np.ndarray], + calculate_grad: bool, + apply_transform: bool, + sign: int, + ) -> Union[float, tuple[float, np.ndarray]]: + """ + Evaluate cost and optional gradient for a single input. """ - # Note, we use the transformation and parameter properties here to enable - # differing attributes within the `LogPosterior` class + # Setup input transformation self.has_transform = self.transformation is not None and apply_transform - model_inputs = self.parameters.verify(self._apply_transformations(inputs)) + model_inputs = self.parameters.verify(self._apply_transformations(input_value)) self.parameters.update(values=list(model_inputs.values())) - # Check whether we are maximising or minimising via: - # | `minimising` | `self.minimising` | `for_optimiser` | - # |--------------|-------------------|-----------------| - # | `True` | `True` | `True` | - # | `True` | `True` | `False` | - # | `False` | `False` | `True` | - # | `True` | `False` | `False` | - minimising = self.minimising or not for_optimiser + if self._has_separable_problem: + return self._evaluate_separable_problem( + input_value, calculate_grad=calculate_grad, sign=sign + ) - y = self.DeferredPrediction - dy = self.DeferredPrediction if calculate_grad else None + return self._evaluate_non_separable_problem( + calculate_grad=calculate_grad, sign=sign + ) - if self._has_separable_problem: - if calculate_grad: - y, dy = self.problem.evaluateS1(self.problem.parameters.as_dict()) - cost, grad = self.compute(y, dy=dy) + def _evaluate_separable_problem( + self, input_value: Union[Inputs, np.ndarray], calculate_grad: bool, sign: int + ) -> Union[float, tuple[float, np.ndarray]]: + """Evaluation for separable problems.""" + if calculate_grad: + y, dy = self.problem.evaluateS1(self.problem.parameters.as_dict()) + cost, grad = self.compute(y, dy=dy) + + if self.has_transform and np.isfinite(cost): + jac = self.transformation.jacobian(input_value) + grad = np.matmul(grad, jac) - if self.has_transform and np.isfinite(cost): - jac = self.transformation.jacobian(inputs) - grad = np.matmul(grad, jac) + return cost * sign, grad * sign - return cost * (1 if minimising else -1), grad * ( - 1 if minimising else -1 - ) + y = self.problem.evaluate(self.problem.parameters.as_dict()) + return self.compute(y, dy=None) * sign - y = self.problem.evaluate(self.problem.parameters.as_dict()) + def _evaluate_non_separable_problem( + self, calculate_grad: bool, sign: int + ) -> Union[float, tuple[float, np.ndarray]]: + """Evaluation for non-separable problems.""" + y = self.DeferredPrediction + dy = self.DeferredPrediction if calculate_grad else None - return self.compute(y, dy=dy) * (1 if minimising else -1) + if calculate_grad: + cost, grad = self.compute(y, dy=dy) + return cost * sign, grad * sign + + return self.compute(y, dy=dy) * sign def _apply_transformations(self, inputs): """Apply transformation if needed""" @@ -157,6 +191,16 @@ def compute(self, y: dict, dy: Optional[np.ndarray]): """ raise NotImplementedError + def sensitivity_analysis(self, n_samples: int = 256): + salib_dict = { + "names": self.parameters.keys(), + "bounds": self.parameters.get_bounds_for_plotly(), + "num_vars": len(self.parameters.keys()), + } + + param_values = sample(salib_dict, n_samples) + return sobol.analyze(salib_dict, np.asarray(self.__call__(param_values))) + def set_fail_gradient(self, de: float = 1.0): """ Set the fail gradient to a specified value. diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index 6dd1a047..7b1aab33 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -145,6 +145,14 @@ def set_base_options(self): # Set multistart self.multistart = self.unset_options.pop("multistart", 1) + # Parameter sensitivities + self.compute_sensitivities = self.unset_options.pop( + "compute_sensitivities", False + ) + self.n_samples_sensitivity = self.unset_options.pop( + "n_sensitivity_samples", 256 + ) + def _set_up_optimiser(self): """ Parse optimiser options and prepare the optimiser. @@ -213,6 +221,9 @@ def run(self): # Store the optimised parameters self.parameters.update(values=self.result.x_best) + # Compute sensitivities + self.result.sensitivities = self._parameter_sensitivities() + if self.verbose: print(self.result) @@ -231,6 +242,16 @@ def _run(self): """ raise NotImplementedError + def _parameter_sensitivities(self): + if not self.compute_sensitivities: + return None + + print("Computing sensitivities...") + sensitivities = self.cost.sensitivity_analysis(self.n_samples_sensitivity) + print("Done.") + + return sensitivities + def log_update(self, x=None, x_best=None, cost=None, cost_best=None, x0=None): """ Update the log with new values. @@ -357,6 +378,7 @@ def __init__( optim: BaseOptimiser, x: Union[Inputs, np.ndarray] = None, final_cost: Optional[float] = None, + sensitivities: Optional[dict] = None, n_iterations: Optional[int] = None, n_evaluations: Optional[int] = None, time: Optional[float] = None, @@ -370,6 +392,7 @@ def __init__( self._best_run = None self._x = [] self._final_cost = [] + self._sensitivities = None self._fisher = [] self._n_iterations = [] self._n_evaluations = [] @@ -499,11 +522,21 @@ def __str__(self) -> str: Returns: str: A formatted string containing optimisation result information. """ + # Format the sensitivities + self.sense_format = "" + if self._sensitivities: + self.sense_format = "" + for value, conf in zip( + self._sensitivities["ST"], self._sensitivities["ST_conf"] + ): + self.sense_format += f" {value:.3f} ± {conf:.3f}," + return ( f"OptimisationResult:\n" f" Best result from {self.n_runs} run(s).\n" f" Initial parameters: {self.x0_best}\n" f" Optimised parameters: {self.x_best}\n" + f" Total-order sensitivities:{self.sense_format}\n" f" Diagonal Fisher Information entries: {self.fisher_best}\n" f" Final cost: {self.final_cost_best}\n" f" Optimisation time: {self.time_best} seconds\n" @@ -524,14 +557,14 @@ def _get_single_or_all(self, attr): value = getattr(self, attr) return value[0] if len(value) == 1 else value - @property - def x_best(self): - return self._x[self._best_run] if self._best_run is not None else None - @property def x(self): return self._get_single_or_all("_x") + @property + def x_best(self): + return self._x[self._best_run] if self._best_run is not None else None + @property def x0(self): return self._get_single_or_all("_x0") @@ -552,6 +585,14 @@ def final_cost_best(self): def fisher(self): return self._get_single_or_all("_fisher") + @property + def sensitivities(self): + return self._get_single_or_all("_sensitivities") + + @sensitivities.setter + def sensitivities(self, obj: dict): + self._sensitivities = obj + @property def fisher_best(self): return self._fisher[self._best_run] if self._best_run is not None else None diff --git a/pyproject.toml b/pyproject.toml index a82f2339..c3a89fa4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -26,10 +26,11 @@ classifiers = [ ] requires-python = ">=3.9, <3.13" dependencies = [ - "pybamm[jax]>=24.9", - "numpy>=1.16, <2.0", - "scipy>=1.3", - "pints>=0.5", + "pybamm[jax]>=24.9", + "numpy>=1.16, <2.0", + "scipy>=1.3", + "pints>=0.5", + "SALib>=1.5", ] [project.optional-dependencies] diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index faea071c..80989cf6 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -592,6 +592,7 @@ def test_halting(self, cost): f" Best result from {results.n_runs} run(s).\n" f" Initial parameters: {results.x0}\n" f" Optimised parameters: {results.x}\n" + f" Total-order sensitivities:{results.sense_format}\n" f" Diagonal Fisher Information entries: {None}\n" f" Final cost: {results.final_cost}\n" f" Optimisation time: {results.time} seconds\n" @@ -638,6 +639,7 @@ def test_halting(self, cost): f" Best result from {results.n_runs} run(s).\n" f" Initial parameters: {results.x0}\n" f" Optimised parameters: {results.x}\n" + f" Total-order sensitivities:{results.sense_format}\n" f" Diagonal Fisher Information entries: {None}\n" f" Final cost: {results.final_cost}\n" f" Optimisation time: {results.time} seconds\n" From 66cc648a78ba8d130f45ce3aa672534b07416836 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 27 Jan 2025 11:11:25 +0000 Subject: [PATCH 2/6] fix: missed example syntax, adds changelog entry --- CHANGELOG.md | 2 ++ examples/scripts/comparison_examples/spm_CMAES.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ec1c63c8..35574926 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,8 @@ ## Features +- [#635](https://github.com/pybop-team/PyBOP/pull/635) - Adds support for multi-proposal evaluation of list-like objects to `BaseCost` classes. +- [#635](https://github.com/pybop-team/PyBOP/pull/635) - Adds global parameter sensitivity analysis with method `BaseCost.sensitivity_analysis`. This is computation is added to `OptimisationResult` if optimiser arg `compute_sensitivities` is `True`. An additional arg is added to select the number of samples for analysis: `n_sensitivity_samples`. - [#619](https://github.com/pybop-team/PyBOP/pull/619) - Adds `pybop.SimulatingAnnealing` optimiser with corresponding tests. - [#565](https://github.com/pybop-team/PyBOP/pull/627) - DigiBatt added as funding partner. diff --git a/examples/scripts/comparison_examples/spm_CMAES.py b/examples/scripts/comparison_examples/spm_CMAES.py index d0906e5d..447a6367 100644 --- a/examples/scripts/comparison_examples/spm_CMAES.py +++ b/examples/scripts/comparison_examples/spm_CMAES.py @@ -56,7 +56,7 @@ pybop.plot.dataset(dataset) # Plot the timeseries output -pybop.plot.quick(problem, problem_inputs=results.x, title="Optimised Comparison") +pybop.plot.quick(problem, problem_inputs=results.x_best, title="Optimised Comparison") # Plot convergence pybop.plot.convergence(optim) From f9f729b38af7c689945c2f62764a26eb9dc78afa Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 29 Jan 2025 17:07:27 +0000 Subject: [PATCH 3/6] tests: adds unit tests for sensitivity analysis --- tests/unit/test_cost.py | 17 +++++++++++++++++ tests/unit/test_optimisation.py | 10 +++++++++- 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index c46831d3..3aded269 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -400,3 +400,20 @@ def test_mixed_problem_classes(self, problem, design_problem): match="All problems must be of the same class type.", ): pybop.WeightedCost(cost1, cost2) + + def test_parameter_sensitivities(self, problem): + cost = pybop.MeanAbsoluteError(problem) + result = cost.sensitivity_analysis(4) + + # Assertions + assert isinstance(result, dict) + assert "S1" in result + assert "ST" in result + assert isinstance(result["S1"], np.ndarray) + assert isinstance(result["S2"], np.ndarray) + assert isinstance(result["ST"], np.ndarray) + assert isinstance(result["S1_conf"], np.ndarray) + assert isinstance(result["ST_conf"], np.ndarray) + assert isinstance(result["S2_conf"], np.ndarray) + assert result["S1"].shape == (1,) + assert result["ST"].shape == (1,) diff --git a/tests/unit/test_optimisation.py b/tests/unit/test_optimisation.py index c72dee6f..43053af2 100644 --- a/tests/unit/test_optimisation.py +++ b/tests/unit/test_optimisation.py @@ -730,7 +730,14 @@ def test_optimisation_results(self, cost): pybop.OptimisationResult(optim=optim, x=[1e-5], n_iterations=1) # Test list-like functionality with "best" properties - optim = pybop.Optimisation(cost=cost, n_iterations=1, multistart=3) + optim = pybop.Optimisation( + cost=cost, + x0=[0.6, 0.5], + n_iterations=1, + multistart=3, + compute_sensitivities=True, + n_samples_sensitivity=8, + ) results = optim.run() assert results.pybamm_solution_best in results.pybamm_solution @@ -740,3 +747,4 @@ def test_optimisation_results(self, cost): assert results.n_iterations_best in results.n_iterations assert results.n_evaluations_best in results.n_evaluations assert results.x0_best in results.x0 + assert isinstance(results.sensitivities, dict) From 61ac28c1d7f8e82659a1665284c0b3dc6deb08e7 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Wed, 29 Jan 2025 18:13:08 +0000 Subject: [PATCH 4/6] tests: adds explicit dictionary type cost call --- tests/unit/test_cost.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tests/unit/test_cost.py b/tests/unit/test_cost.py index 3aded269..9ef7d070 100644 --- a/tests/unit/test_cost.py +++ b/tests/unit/test_cost.py @@ -29,10 +29,12 @@ def ground_truth(self): @pytest.fixture def parameters(self): - return pybop.Parameter( - "Negative electrode active material volume fraction", - prior=pybop.Gaussian(0.5, 0.01), - bounds=[0.375, 0.625], + return pybop.Parameters( + pybop.Parameter( + "Negative electrode active material volume fraction", + prior=pybop.Gaussian(0.5, 0.01), + bounds=[0.375, 0.625], + ) ) @pytest.fixture @@ -116,7 +118,8 @@ def test_base(self, model, parameters, dataset): with pytest.raises(NotImplementedError): base_cost([0.5]) - def test_costs(self, cost): + def test_costs(self, cost, parameters): + # Test cost direction if isinstance(cost, pybop.BaseLikelihood): higher_cost = cost([0.52]) lower_cost = cost([0.55]) @@ -129,6 +132,7 @@ def test_costs(self, cost): # Test type of returned value assert np.isscalar(cost([0.5])) + assert np.isscalar(cost(parameters.as_dict())) # Test UserWarnings if isinstance(cost, (pybop.SumSquaredError, pybop.RootMeanSquaredError)): From 327334677b131a400fa6cd9af6cbe1d8408a8cef Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Thu, 30 Jan 2025 10:21:59 +0000 Subject: [PATCH 5/6] docs: update docstrings --- pybop/costs/base_cost.py | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 2e329114..d9bafa49 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -94,8 +94,8 @@ def __call__( # Convert dict to list for sequential computations if isinstance(inputs, dict): inputs = list(inputs.values()) - input_list = np.atleast_2d(inputs) + minimising = self.minimising or not for_optimiser sign = 1 if minimising else -1 @@ -118,9 +118,7 @@ def _evaluate_single_input( apply_transform: bool, sign: int, ) -> Union[float, tuple[float, np.ndarray]]: - """ - Evaluate cost and optional gradient for a single input. - """ + """Evaluate cost (and optional gradient) for a single input.""" # Setup input transformation self.has_transform = self.transformation is not None and apply_transform model_inputs = self.parameters.verify(self._apply_transformations(input_value)) @@ -192,6 +190,20 @@ def compute(self, y: dict, dy: Optional[np.ndarray]): raise NotImplementedError def sensitivity_analysis(self, n_samples: int = 256): + """ + Computes the parameter sensitivities on the cost function. + + Parameters + ---------- + n_samples : int, optional + Number of samples for SOBOL sensitivity analysis, + performs best as order of 2, i.e. 128, 256, etc. + + Returns + ------- + Sensitivities : dict + """ + salib_dict = { "names": self.parameters.keys(), "bounds": self.parameters.get_bounds_for_plotly(), From e1a1d296103d174334311506734cb418e9703264 Mon Sep 17 00:00:00 2001 From: Brady Planden Date: Mon, 3 Feb 2025 14:28:53 +0000 Subject: [PATCH 6/6] fix: suggestion from review --- pybop/costs/base_cost.py | 13 +++++++++++-- pybop/optimisers/base_optimiser.py | 6 +----- pybop/transformation/base_transformation.py | 8 ++------ 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/pybop/costs/base_cost.py b/pybop/costs/base_cost.py index 5dfc6475..d2dcd7a8 100644 --- a/pybop/costs/base_cost.py +++ b/pybop/costs/base_cost.py @@ -74,7 +74,9 @@ def __call__( Parameters ---------- inputs : Union[Inputs, list, np.ndarray] - Input parameters for cost computation. Supports single or multiple inputs. + Input parameters for cost computation. Supports list-like evaluation of + multiple input values, shaped [N,M] where N is the number of input positions + to evaluate and M is the number of inputs for the underlying model (i.e. parameters). calculate_grad : bool, default=False If True, both the cost and gradient will be computed. Otherwise, only the cost is computed. @@ -191,7 +193,8 @@ def compute(self, y: dict, dy: Optional[np.ndarray]): def sensitivity_analysis(self, n_samples: int = 256): """ - Computes the parameter sensitivities on the cost function. + Computes the parameter sensitivities on the cost function using + SOBOL analyse from the SALib module [1]. Parameters ---------- @@ -199,6 +202,12 @@ def sensitivity_analysis(self, n_samples: int = 256): Number of samples for SOBOL sensitivity analysis, performs best as order of 2, i.e. 128, 256, etc. + References + ---------- + .. [1] Iwanaga, T., Usher, W., & Herman, J. (2022). Toward SALib 2.0: + Advancing the accessibility and interpretability of global sensitivity + analyses. Socio-Environmental Systems Modelling, 4, 18155. doi:10.18174/sesmo.18155 + Returns ------- Sensitivities : dict diff --git a/pybop/optimisers/base_optimiser.py b/pybop/optimisers/base_optimiser.py index b6cac54a..3aa1bd55 100644 --- a/pybop/optimisers/base_optimiser.py +++ b/pybop/optimisers/base_optimiser.py @@ -247,11 +247,7 @@ def _parameter_sensitivities(self): if not self.compute_sensitivities: return None - print("Computing sensitivities...") - sensitivities = self.cost.sensitivity_analysis(self.n_samples_sensitivity) - print("Done.") - - return sensitivities + return self.cost.sensitivity_analysis(self.n_samples_sensitivity) def log_update(self, x=None, x_best=None, cost=None, cost_best=None, x0=None): """ diff --git a/pybop/transformation/base_transformation.py b/pybop/transformation/base_transformation.py index a36ba808..4ddf5719 100644 --- a/pybop/transformation/base_transformation.py +++ b/pybop/transformation/base_transformation.py @@ -17,16 +17,12 @@ class Transformation(ABC): Based on pints.transformation method. References - ---------- + ----------- .. [1] Erik Jorgensen and Asger Roer Pedersen. "How to Obtain Those Nasty Standard Errors From Transformed Data." http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.47.9023 .. [2] Kaare Brandt Petersen and Michael Syskind Pedersen. "The Matrix Cookbook." 2012. - """ - # ---- To be implemented with Monte Carlo PR ------ # - # def convert_log_prior(self, log_prior): - # """Returns a transformed log-prior class.""" - # return TransformedLogPrior(log_prior, self) + """ def convert_covariance_matrix(self, cov: np.ndarray, q: np.ndarray) -> np.ndarray: """