diff --git a/CHANGELOG.md b/CHANGELOG.md index 04be76b03..aebb5a1ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,8 @@ +## 3.3.1 – 2023-04-04 + +- Updates for Pandas 2 compatibility +- Fixed bug in MCMC oversampling and simplified proposal code (#288) (thanks @JiangJQ2000) + ## 3.3 – 2023-03-29 ### General diff --git a/cobaya/__init__.py b/cobaya/__init__.py index bb9f065a3..aff75e438 100644 --- a/cobaya/__init__.py +++ b/cobaya/__init__.py @@ -13,7 +13,7 @@ sys.exit(1) __author__ = "Jesus Torrado and Antony Lewis" -__version__ = "3.3" -__obsolete__ = False +__version__ = "3.3.1" +__obsolete__ = Falses __year__ = "2023" __url__ = "https://cobaya.readthedocs.io" diff --git a/cobaya/samplers/mcmc/proposal.py b/cobaya/samplers/mcmc/proposal.py index b0a45cddf..8d95876cb 100644 --- a/cobaya/samplers/mcmc/proposal.py +++ b/cobaya/samplers/mcmc/proposal.py @@ -41,7 +41,7 @@ def __init__(self, n, random_state): n = len(n) super().__init__(n, random_state) if n <= 2: - self.indices = list(range(n)) + self.indices = self.sorted_indices def next(self): """ @@ -66,6 +66,7 @@ def next(self): else: import warnings + def random_SO_N(dim, random_state): """ Draw random samples from SO(N). @@ -86,10 +87,12 @@ def random_SO_N(dim, random_state): _rvs(dim, xx, H) return H + logging.getLogger('numba').setLevel(logging.ERROR) with warnings.catch_warnings(): warnings.filterwarnings("ignore") + @numba.njit("void(int64,float64[::1],float64[:,::1])") def _rvs(dim, xx, H): D = np.empty((dim,)) @@ -187,18 +190,21 @@ def __init__(self, parameter_blocks, random_state, oversampling_factors=None, "The index given for the last slow block, %d, is not valid: " "there are only %d blocks.", self.i_last_slow_block, len(parameter_blocks)) - n_all = sum(len(b) for b in parameter_blocks) - n_slow = sum(len(b) for b in parameter_blocks[:1 + self.i_last_slow_block]) + n_block = np.array([len(b) for b in parameter_blocks]) + n_all = sum(n_block) + n_slow = sum(n_block[:1 + self.i_last_slow_block]) self.nsamples_slow = 0 self.nsamples_fast = 0 if set(chain(*parameter_blocks)) != set(range(n_all)): raise LoggedError(self.log, "The blocks do not contain all the parameter indices.") + + # Creating the blocked proposers + self.proposer = [(RandDirectionProposer(n, random_state) if n > 1 + else RandProposer1D(1, random_state)) for n in n_block] + # Prepare indices for the cycler, repeated if there is oversampling - self.n_block = np.array([len(b) for b in parameter_blocks]) - indices_repeated = list(chain( - *[list(chain(*[[p] * o for p in block])) - for block, o in zip(parameter_blocks, oversampling_factors)])) + # Mapping between internal indices, sampler parameter indices and blocks: # let i=0,1,... be the indices of the parameters for the sampler, # and j=0,1,... be the indices of the parameters as the proposer manages them @@ -207,25 +213,30 @@ def __init__(self, parameter_blocks, random_state, oversampling_factors=None, # iblock is the index of the blocks, which in term of j indices is simply # [0,0,1] in this example self.i_of_j = np.array(list(chain(*parameter_blocks))) - self.iblock_of_j = list( - chain(*[[iblock] * len(b) for iblock, b in enumerate(parameter_blocks)])) - # Creating the blocked proposers - self.proposer = [(RandDirectionProposer(len(b), random_state) if len(b) > 1 - else RandProposer1D(1, random_state)) for b in parameter_blocks] + + block_indices = range(len(parameter_blocks)) + # Starting j index of each block - self.j_start = [len(list(chain(*parameter_blocks[:iblock]))) - for iblock, b in enumerate(parameter_blocks)] + self.j_start = [sum(n_block[:iblock]) for iblock in block_indices] + + self.par_blocks = [self.i_of_j[j_start:] for j_start in self.j_start] + + # Block cycler, + # each block listed proportional to number of times it should be used + indices_repeated = np.repeat(block_indices, oversampling_factors * n_block) + self.block_cycler = CyclicIndexRandomizer(indices_repeated, random_state) + # Parameter cyclers, cycling over the j's - self.parameter_cycler = CyclicIndexRandomizer(indices_repeated, random_state) # These ones are used by fast dragging only - self.parameter_cycler_slow = CyclicIndexRandomizer(n_slow, random_state) - self.parameter_cycler_fast = CyclicIndexRandomizer(n_all - n_slow, random_state) + iblock_of_j = np.repeat(block_indices, n_block) + self.block_cycler_slow = CyclicIndexRandomizer(iblock_of_j[:n_slow], random_state) + self.block_cycler_fast = CyclicIndexRandomizer(iblock_of_j[n_slow:], random_state) def d(self): return len(self.i_of_j) def get_proposal(self, P): - self.current_iblock = self.iblock_of_j[self.parameter_cycler.next()] + self.current_iblock = self.block_cycler.next() if self.current_iblock <= self.i_last_slow_block: self.nsamples_slow += 1 else: @@ -233,20 +244,16 @@ def get_proposal(self, P): self.get_block_proposal(P, self.current_iblock) def get_proposal_slow(self, P): - current_iblock_slow = self.iblock_of_j[self.parameter_cycler_slow.next()] self.nsamples_slow += 1 - self.get_block_proposal(P, current_iblock_slow) + self.get_block_proposal(P, self.block_cycler_slow.next()) def get_proposal_fast(self, P): self.nsamples_fast += 1 - current_iblock_fast = self.iblock_of_j[self.parameter_cycler_slow.n + - self.parameter_cycler_fast.next()] - self.get_block_proposal(P, current_iblock_fast) + self.get_block_proposal(P, self.block_cycler_fast.next()) def get_block_proposal(self, P, iblock): vec_standardized = self.proposer[iblock].propose_vec(self.proposal_scale) - P[self.i_of_j[self.j_start[iblock]:]] += (self.transform[iblock] - .dot(vec_standardized)) + P[self.par_blocks[iblock]] += self.transform[iblock].dot(vec_standardized) def set_covariance(self, propose_matrix): """