Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
The diff you're trying to view is too large. We only load the first 3000 changed files.
67 changes: 45 additions & 22 deletions nevergrad/benchmark/experiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,28 +54,47 @@
# list_optims = ["QOTPDE", "LQOTPDE", "LQODE"]
# list_optims = ["SPQODE", "SQOPSO", "DiagonalCMA"]
def refactor_optims(x: tp.List[tp.Any]) -> tp.List[tp.Any]: # type: ignore
return [np.random.choice(["NgLO1","NgLO2"])]
#return ["AX", "PCABO"]
return ["LLAMAAdaptiveHybridDEPSOWithDynamicRestart", "LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation", "LLAMAEnhancedDynamicPrecisionBalancedEvolution", "MNO"]
return [np.random.choice(["DE", "SQOPSO", "Cobyla", "DiscreteLenglerOnePlusOne", "DiscreteOnePlusOne", "AXP", "CauchyRandomSearch", "DSproba", "MetaModel", "LLAMAAdaptiveHybridDEPSOWithDynamicRestart", "LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation", "LLAMAEnhancedDynamicPrecisionBalancedEvolution"])]
if np.random.choice([True, True]):
return [np.random.choice(["NgIohLM", "NGDSRW", "NgIohTuned", "NGOptRW"])]
if False:
return ["NgIohILLM", "NgIohTuned", "PymooBIPOP", "NGOpt"]
# return ["DiscreteLenglerOnePlusOne"]
# return ["OLNDiscreteOnePlusOne"]
# return [np.random.choice([
# "NgLn",
# "SmallLognormalDiscreteOnePlusOne",
# "XLognormalDiscreteOnePlusOne",
# ])]
return [
# "BigLognormalDiscreteOnePlusOne",
# "DiscreteLenglerOnePlusOne",
# "NgLn",
# "SmallLognormalDiscreteOnePlusOne",
# "XLognormalDiscreteOnePlusOne",
"XSmallLognormalDiscreteOnePlusOne",
"MultiLN",
"NgRS",
"NgIohRS",
"NgIohMLn",
"NgIohLn",
# "LognormalDiscreteOnePlusOne",
# "HugeLognormalDiscreteOnePlusOne",
]
#return ["NgIohLLM"]
if True:
optims = [o for o in ng.optimizers.registry.keys() if "LAMA" in o]
return np.random.choice(["DiagonalCMA", "NgIohLLM", "PymooBIPOP", "DE", "SQOPSO"] + ["NgIohTuned"] + [o for o in optims if any([(x in o) for x in ["ADEM", "ptiveHarmonySearch", "CMAESDE","bridDEPSOWithDyn", "CMA","ERADS_Q","EnhancedDynamicPrec","hancedFirew","QPSO","QuantumDifferentialPart"]])], 6, replace=False)
return ["NgIohLLM"]
# return ["NgIohLama"]
lama = ["DiagonalCMA", "PymooBIPOP", "DE", "SQOPSO"] + (["NgIohTuned"] * 5) + [o for o in list(ng.optimizers.registry.keys()) if "LAMA" in o]
optims = [o for o in ng.optimizers.registry.keys() if "LAMA" in o]
lama = ["DiagonalCMA", "PymooBIPOP", "DE", "SQOPSO"] + ["NgIohTuned"] + [o for o in optims if any([(x in o) for x in ["ADEM", "ptiveHarmonySearch", "CMAESDE","bridDEPSOWithDyn", "CMA","ERADS_Q","EnhancedDynamicPrec","hancedFirew","QPSO","QuantumDifferentialPart"]])]
return list(np.random.choice(lama, 2))
# "BigLognormalDiscreteOnePlusOne",
# "DiscreteLenglerOnePlusOne",
# "NgLn",
# "SmallLognormalDiscreteOnePlusOne",
# "XLognormalDiscreteOnePlusOne",
if False:
return [
"XSmallLognormalDiscreteOnePlusOne",
"MultiLN",
"NgRS",
"NgIohRS",
"NgIohMLn",
"NgIohLn",
# "LognormalDiscreteOnePlusOne",
# "HugeLognormalDiscreteOnePlusOne",
]
# return ["CSEC11"]
# return [np.random.choice(["CSEC11", "SQOPSODCMA", "NgIoh4", "NGOpt"])]
# return ["LPCMA"] #return [np.random.choice(["CSEC10", "DSproba", "NgIoh4", "DSbase", "DS3p", "DSsubspace"])]
Expand Down Expand Up @@ -480,7 +499,7 @@ def refactor_optims(x: tp.List[tp.Any]) -> tp.List[tp.Any]: # type: ignore
# return ["DSproba" + str(i) for i in range(2, 10)]
if benchmark in algos: # and np.random.choice([True, False]): # and np.random.randint(2) > 0 and False:
list_algos = algos[benchmark][:5] + [
"CSEC10",
"NgIohLM",
"NGOpt",
"NLOPT_LN_SBPLX",
]
Expand Down Expand Up @@ -1691,15 +1710,15 @@ def yabbob(
for name in names
for rotation in [True, False]
for num_blocks in ([1] if not split else [7, 12])
for d in (
for d in (
[100, 1000, 3000]
if hd
else (
[2, 5, 10, 15]
if tuning
else ([40] if bounded else ([2, 3, 5, 10, 15, 20, 50] if noise else [2, 10, 50]))
else ([40] if bounded else ([2, 3, 5, 10, 15, 20, 50] if noise else [2, 5, 10, 50])) # added 5 for lama stuff
)
)
)
]

assert reduction_factor in [1, 7, 13, 17] # needs to be a cofactor
Expand Down Expand Up @@ -1767,6 +1786,10 @@ def f(x):
if bounded:
budgets = [10, 20, 40, 100, 300]
optims = refactor_optims(optims)
if big:
#budgets = [np.random.choice(budgets)]
functions = list(np.random.choice(functions, 1, replace=False))
optims = list(np.random.choice(optims, 1, replace=False))
for optim in optims:
for function in functions:
for budget in budgets:
Expand All @@ -1780,8 +1803,6 @@ def f(x):
)
if constraint_case != 0:
xp.function.parametrization.has_constraints = True
if np.random.rand() > 0.25:
continue
if not xp.is_incoherent:
yield xp

Expand Down Expand Up @@ -2121,6 +2142,7 @@ def zp_ms_bbob(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:
yield Experiment(function, optim, budget=budget, num_workers=nw, seed=next(seedg))


@registry.register
def nozp_noms_bbob(seed: tp.Optional[int] = None) -> tp.Iterator[Experiment]:
"""Testing optimizers on exponentiated problems.
Cigar, Ellipsoid.
Expand Down Expand Up @@ -3852,7 +3874,8 @@ def lsgo() -> tp.Iterator[Experiment]:
optims = ["DiagonalCMA", "TinyQODE", "OpoDE", "OpoTinyDE"]
optims = ["TinyQODE", "OpoDE", "OpoTinyDE"]
optims = refactor_optims(optims)
for i in range(1, 16): # [np.random.choice(list(range(1, 16)))]:
optims = [np.random.choice(optims)]
for i in [np.random.choice(list(range(1, 16)))]: # [np.random.choice(list(range(1, 16)))]:
for optim in optims:
for budget in [120000, 600000, 3000000]:
yield Experiment(lsgo_makefunction(i).instrumented(), optim, budget=budget)
Expand Down
5 changes: 5 additions & 0 deletions nevergrad/benchmark/gymexperiments.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ def gym_problem_modifier(specific_problem):


def gym_optimizer_modifier(optims):
return [np.random.choice(["NgLO1","NgLO2"])]
return ["NgLN"]
return [np.random.choice(["DE", "SQOPSO", "Cobyla", "DiscreteLenglerOnePlusOne", "DiscreteOnePlusOne", "AXP", "CauchyRandomSearch", "DSproba", "MetaModel", "LLAMAAdaptiveHybridDEPSOWithDynamicRestart", "LLAMAEnhancedRefinedHybridDEPSOWithDynamicAdaptation", "LLAMAEnhancedDynamicPrecisionBalancedEvolution"])]
print(optims)
if os.environ.get("GYM_OPTIMIZER") is not None:
optimizer_string = os.environ.get("GYM_OPTIMIZER")
Expand Down Expand Up @@ -140,6 +143,7 @@ def ng_full_gym(
optims = ["NgIoh4"]
optims = [np.random.choice(["CSEC11", "SQOPSODCMA", "NgIoh4", "NGOpt"])]
optims = ["CSEC11"]
optims = ["NgIohTuned", "NgIohLM"]
if structured:
optims = get_optimizers("split", seed=next(seedg)) # type: ignore
# optims = [np.random.choice(optims)]
Expand All @@ -156,6 +160,7 @@ def ng_full_gym(
optims = ["OLNDiscreteOnePlusOne"]
optims = ["NgIohLn"]
optims = [np.random.choice(["NgIohMLn", "NgIohLn", "NgIoh"])]
optims = [np.random.choice(["NgIohTuned", "NgIohLM", "NGOpt", "PymooBIPOP", "CMA", "SQOPSO"])]
if multi:
controls = ["multi_neural"]
else:
Expand Down
53 changes: 48 additions & 5 deletions nevergrad/benchmark/plotting.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,17 @@ def remove_errors(df: pd.DataFrame) -> utils.Selector:
df = utils.Selector(df)
if "error" not in df.columns: # backward compatibility
return df # type: ignore
dropped = []
non_dropped = 0
for index, row in df.iterrows():
try:
if np.isnan(row["loss"]):
pass
non_dropped += 1
except:
dropped += [index]
print(f"Dropped: {len(dropped)}, Non-dropped: {non_dropped}")
df.drop(dropped, inplace=True)
# errors with no recommendation
nandf = df.select(loss=np.isnan)
for row in nandf.itertuples():
Expand All @@ -153,7 +164,10 @@ def remove_errors(df: pd.DataFrame) -> utils.Selector:
err_inds = set(nandf.index)
output = df.loc[[i for i in df.index if i not in err_inds], [c for c in df.columns if c != "error"]]
# cast nans in loss to infinity
df.loc[np.isnan(df.loss), "loss"] = float("inf")
try:
df.loc[np.isnan(df.loss), "loss"] = float("inf")
except Exception as e:
print(f"pb with isnan(loss): {e}")
#
assert (
not output.loc[:, "loss"].isnull().values.any()
Expand Down Expand Up @@ -276,8 +290,10 @@ def create_plots(
assert (
len(failed_indices) < 100
), f"Fails at row {i+2}, Exceptions: {e1}, {e2}. Failed-indices = {failed_indices}"
df.drop(index=i, inplace=True)
print("We drop index ", i, " for ", col)
#for i in sorted(failed_indices, reverse=True):
# print("We drop index ", i, " for ", col, " out of ", len(df.index), " rows.")
print("Dropping ", failed_indices)
df.drop(df.index[failed_indices], inplace=True) #index=i, axis='columns', inplace=True)
elif col != "loss":
df[col] = df[col].astype(str)
df[col] = df[col].replace(r"\.[0]*$", "", regex=True)
Expand Down Expand Up @@ -374,11 +390,12 @@ def create_plots(
best_algo[i] += ["none"]

# Let us loop over all combinations of variables.
numbers = defaultdict(list)
for case in df.unique(fixed) if fixed else [()]:
print("\n# new case #", fixed, case)
casedf = df.select(**dict(zip(fixed, case)))
data_df = FightPlotter.winrates_from_selection(
casedf, fight_descriptors, num_rows=num_rows, num_cols=350
casedf, fight_descriptors, num_rows=num_rows, num_cols=350, display=(len(fixed) == 1)
)
fplotter = FightPlotter(data_df)
# Competence maps: we find out the best algorithm for each attribute1=valuei/attribute2=valuej.
Expand All @@ -391,6 +408,8 @@ def create_plots(
name = compactize(name)
fullname = name

print(f"Number {len([c for c in name if c ==','])} {name}")
numbers[len([c for c in name if c ==","])] += [(len(list(data_df.columns[:])), name)]
if len(name) > 240:
hashcode = hashlib.md5(bytes(name, "utf8")).hexdigest()
name = re.sub(r"\([^()]*\)", "", name)
Expand Down Expand Up @@ -420,7 +439,12 @@ def create_plots(
name = "competencemap_" + ",".join("{}".format(x) for x in fixed) + ".tex"
export_table(str(output_folder / name), xindices, yindices, best_algo)
print("Competence map data:", fixed, case, best_algo)

for i in numbers:
print(f"Number = {i}")
if np.std([n[0] for n in numbers[i]]) > 0:
for a, b in sorted(numbers[i]):
if a < np.average([n[0] for n in numbers[i]]) - np.std([n[0] for n in numbers[i]]):
print("WARNING !!!", a, b, " vs average=", np.average([n[0] for n in numbers[i]]))
plt.close("all")
# xp plots: for each experimental setup, we plot curves with budget in x-axis.
# plot mean loss / budget for each optimizer for 1 context
Expand Down Expand Up @@ -688,7 +712,19 @@ def make_data(df: pd.DataFrame, normalized_loss: bool = False) -> tp.Dict[str, t
"seed",
}
)
my_descriptors = [c for c in df.columns if c not in ["pseudotime", "time", "elapsed_time", "loss", "seed"]]
my_dict = {}
for d in my_descriptors:
my_dict[d] = 'mean'
# print("grouping by ", my_descriptors)
# print("len(df) before = ", len(df))
# print("columns bofre:", df.columns)
df = df.groupby(my_descriptors).agg('mean').reset_index()
# print("len(df) after = ", len(df))
# print("columns after:", df.columns)
# print(df)
df = normalized_losses(df, descriptors=descriptors)

df = utils.Selector(
df.loc[
:,
Expand All @@ -711,6 +747,7 @@ def make_data(df: pd.DataFrame, normalized_loss: bool = False) -> tp.Dict[str, t
optim_vals[optim]["num_eval"] = num_eval
if "pseudotime" in means.columns:
optim_vals[optim]["pseudotime"] = np.array(means.loc[optim, "pseudotime"])

return optim_vals

@staticmethod
Expand Down Expand Up @@ -823,6 +860,7 @@ def winrates_from_selection(
num_rows: int = 5,
num_cols: int = 350,
complete_runs_only: bool = False,
display: bool = False,
) -> pd.DataFrame:
"""Creates a fight plot win rate data out of the given run dataframe,
by iterating over all cases with fixed category variables.
Expand All @@ -842,6 +880,11 @@ def winrates_from_selection(
num_rows = min(num_rows, len(all_optimizers))
# iterate on all sub cases
victories, total = aggregate_winners(df, categories, all_optimizers)
#print(f"Numbers of runs per algorithm: {[int(2 * victories.loc[n, n]) for n in all_optimizers]}")
if display:
to_display = sorted([(2 * victories.loc[n,n], n) for n in all_optimizers], reverse=True)
for num, n in to_display:
print(n, "==>", num, "/", max([int(2 * victories.loc[n, n]) for n in all_optimizers]))
if complete_runs_only:
max_num = max([int(2 * victories.loc[n, n]) for n in all_optimizers])
new_all_optimizers = [n for n in all_optimizers if int(2 * victories.loc[n, n]) == max_num]
Expand Down
75 changes: 75 additions & 0 deletions nevergrad/optimization/lama/AADCCS.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import numpy as np


class AADCCS:
def __init__(
self,
budget,
dimension=5,
lower_bound=-5.0,
upper_bound=5.0,
population_size=150,
F_base=0.5,
CR_base=0.8,
learning_rate=0.1,
p=0.25,
):
self.budget = budget
self.dimension = dimension
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.population_size = population_size
self.F_base = F_base # Initial mutation factor
self.CR_base = CR_base # Initial crossover probability
self.learning_rate = learning_rate # Learning rate for adaptive parameters
self.p = p # Probability of using best individual updates

def __call__(self, func):
# Initialize population and fitness
population = np.random.uniform(
self.lower_bound, self.upper_bound, (self.population_size, self.dimension)
)
fitness = np.array([func(ind) for ind in population])
evaluations = self.population_size

best_index = np.argmin(fitness)
best_individual = population[best_index]
best_fitness = fitness[best_index]

# Adaptive mutation and crossover probabilities
F_adaptive = np.full(self.population_size, self.F_base)
CR_adaptive = np.full(self.population_size, self.CR_base)

while evaluations < self.budget:
for i in range(self.population_size):
if evaluations >= self.budget:
break

# Choose different indices for mutation, ensuring all are unique
indices = np.random.choice(self.population_size, 4, replace=False)
a, b, c, d = population[indices]

# Mutation with best individual influence
if np.random.rand() < self.p:
a = best_individual # Using best individual to guide mutation

# Differential mutation and crossover
mutant = np.clip(a + F_adaptive[i] * ((b - c) + (a - d)), self.lower_bound, self.upper_bound)
trial = np.where(np.random.rand(self.dimension) < CR_adaptive[i], mutant, population[i])
trial_fitness = func(trial)
evaluations += 1

# Selection and adaptivity update
if trial_fitness < fitness[i]:
population[i], fitness[i] = trial, trial_fitness
if trial_fitness < best_fitness:
best_fitness, best_individual = trial_fitness, trial.copy()
# Adaptive factor update towards successful mutations
F_adaptive[i] = max(0.1, F_adaptive[i] + self.learning_rate * (1.0 - F_adaptive[i]))
CR_adaptive[i] = min(1.0, CR_adaptive[i] - self.learning_rate * CR_adaptive[i])
else:
# Adaptive factor degradation towards unsuccessful mutations
F_adaptive[i] = max(0.1, F_adaptive[i] - self.learning_rate * F_adaptive[i])
CR_adaptive[i] = min(1.0, CR_adaptive[i] + self.learning_rate * (1.0 - CR_adaptive[i]))

return best_fitness, best_individual
Loading