diff --git a/GPflowOpt/acquisition/__init__.py b/GPflowOpt/acquisition/__init__.py index eca69fe..1b93126 100644 --- a/GPflowOpt/acquisition/__init__.py +++ b/GPflowOpt/acquisition/__init__.py @@ -19,6 +19,8 @@ from .ei import ExpectedImprovement from .poi import ProbabilityOfImprovement from .lcb import LowerConfidenceBound +# Batch optimisation +from .qei_cl import QEI_CL # Multiobjective from .hvpoi import HVProbabilityOfImprovement diff --git a/GPflowOpt/acquisition/acquisition.py b/GPflowOpt/acquisition/acquisition.py index 463478b..18c4076 100644 --- a/GPflowOpt/acquisition/acquisition.py +++ b/GPflowOpt/acquisition/acquisition.py @@ -75,7 +75,7 @@ class Acquisition(Parameterized): objectives. """ - def __init__(self, models=[], optimize_restarts=5): + def __init__(self, models=[], optimize_restarts=5, batch_size=1): """ :param models: list of GPflow models representing our beliefs about the problem :param optimize_restarts: number of optimization restarts to use when training the models @@ -87,6 +87,7 @@ def __init__(self, models=[], optimize_restarts=5): assert (optimize_restarts >= 0) self.optimize_restarts = optimize_restarts + self.batch_size = batch_size self._needs_setup = True def _optimize_models(self): diff --git a/GPflowOpt/acquisition/ei.py b/GPflowOpt/acquisition/ei.py index 77daf0a..4a9ca2a 100644 --- a/GPflowOpt/acquisition/ei.py +++ b/GPflowOpt/acquisition/ei.py @@ -52,11 +52,11 @@ class ExpectedImprovement(Acquisition): \\alpha(\\mathbf x_{\\star}) = \\int \\max(f_{\\min} - f_{\\star}, 0) \\, p( f_{\\star}\\,|\\, \\mathbf x, \\mathbf y, \\mathbf x_{\\star} ) \\, d f_{\\star} """ - def __init__(self, model): + def __init__(self, model, batch_size=1): """ :param model: GPflow model (single output) representing our belief of the objective """ - super(ExpectedImprovement, self).__init__(model) + super(ExpectedImprovement, self).__init__(model, batch_size=batch_size) self.fmin = DataHolder(np.zeros(1)) self._setup() diff --git a/GPflowOpt/acquisition/qei_cl.py b/GPflowOpt/acquisition/qei_cl.py new file mode 100644 index 0000000..3cdcdad --- /dev/null +++ b/GPflowOpt/acquisition/qei_cl.py @@ -0,0 +1,51 @@ +# Copyright 2017 Joachim van der Herten +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from .ei import ExpectedImprovement +import numpy as np + + +class QEI_CL(ExpectedImprovement): + """ + This class is an implementation of the constant liar heuristic (min case) + for using Expected Improvement in the batch case. + + See: + Ginsbourger D., Le Riche R., Carraro L. (2010) + Kriging Is Well-Suited to Parallelize Optimization. + """ + + def __init__(self, model, batch_size): + super(QEI_CL, self).__init__(model, batch_size=batch_size) + self.in_batch = False + + def set_batch(self, *args): + assert self.in_batch, 'Set batch must be called within a context' + + X = np.vstack((self.X_,) + args) + Y = np.vstack((self.Y_,) + (self.fmin.value,)*len(args)) + self.set_data(X, Y) + + def __enter__(self): + self.in_batch = True + + # Save original dataset of the model + self.X_, self.Y_ = np.copy(self.data[0]), np.copy(self.data[1]) + + def __exit__(self, exc_type, exc_value, traceback): + # Restore original dataset of the model + self.set_data(self.X_, self.Y_) + + self.in_batch = False diff --git a/GPflowOpt/bo.py b/GPflowOpt/bo.py index 8071104..ecc6764 100644 --- a/GPflowOpt/bo.py +++ b/GPflowOpt/bo.py @@ -190,8 +190,19 @@ def inverse_acquisition(x): # Optimization loop for i in range(n_iter): - result = self.optimizer.optimize(inverse_acquisition) - self._update_model_data(result.x, fx(result.x)) + if self.acquisition.batch_size > 1: + batch = [] + with self.acquisition: + for j in range(self.acquisition.batch_size): + result = self.optimizer.optimize(inverse_acquisition) + batch.append(result.x) + self.acquisition.set_batch(*batch) + + batch_array = np.concatenate(batch) + self._update_model_data(batch_array, fx(batch_array)) + else: + result = self.optimizer.optimize(inverse_acquisition) + self._update_model_data(result.x, fx(result.x)) return self._create_bo_result(True, "OK")