From af99badeeca3abbc82d5c9c8ae7c0c5c090d42e8 Mon Sep 17 00:00:00 2001 From: Thomas Morris Date: Thu, 21 Nov 2024 00:10:43 -0500 Subject: [PATCH 1/5] refactored constraints --- .pre-commit-config.yaml | 10 +- src/blop/agent.py | 81 ++++++++++------ src/blop/bayesian/models.py | 2 + src/blop/objectives.py | 182 +++++++++++++++++++----------------- src/blop/plotting.py | 18 ++-- src/blop/tests/conftest.py | 23 ++++- 6 files changed, 180 insertions(+), 136 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8e6ebed..59b5990 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -2,28 +2,28 @@ default_language_version: python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v5.0.0 hooks: - id: check-yaml - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/ambv/black - rev: 23.1.0 + rev: 24.10.0 hooks: - id: black language_version: python3 - id: black-jupyter language_version: python3 - repo: https://github.com/pycqa/flake8 - rev: 6.0.0 + rev: 7.1.1 hooks: - id: flake8 - repo: https://github.com/pycqa/isort - rev: 5.12.0 + rev: 5.13.2 hooks: - id: isort args: ["--profile", "black"] - repo: https://github.com/kynan/nbstripout - rev: 0.6.1 + rev: 0.8.1 hooks: - id: nbstripout diff --git a/src/blop/agent.py b/src/blop/agent.py index f595968..4e95546 100644 --- a/src/blop/agent.py +++ b/src/blop/agent.py @@ -17,8 +17,6 @@ import pandas as pd import scipy as sp import torch - -# from botorch.utils.transforms import normalize from botorch.acquisition.objective import ScalarizedPosteriorTransform from botorch.models.deterministic import GenericDeterministicModel from botorch.models.model_list_gp_regression import ModelListGP @@ -184,7 +182,7 @@ def refresh(self): def redigest(self): self._table = self.digestion(self._table, **self.digestion_kwargs) - def sample(self, n: int = DEFAULT_MAX_SAMPLES, method: str = "quasi-random") -> torch.Tensor: + def sample(self, n: int = DEFAULT_MAX_SAMPLES, normalize: bool = False, method: str = "quasi-random") -> torch.Tensor: """ Returns a (..., 1, n_active_dofs) tensor of points sampled within the parameter space. @@ -214,7 +212,7 @@ def sample(self, n: int = DEFAULT_MAX_SAMPLES, method: str = "quasi-random") -> else: raise ValueError("'method' argument must be one of ['quasi-random', 'random', 'grid'].") - return self.dofs(active=True).untransform(X).double() + return X.double() if normalize else self.dofs(active=True).untransform(X).double() def ask(self, acqf="qei", n=1, route=True, sequential=True, upsample=1, **acqf_kwargs): """Ask the agent for the best point to sample, given an acquisition function. @@ -382,7 +380,7 @@ def tell( t0 = ttime.monotonic() train_model(obj.model) if self.verbose: - print(f"trained model '{obj.name}' in {1e3*(ttime.monotonic() - t0):.00f} ms") + print(f"trained model '{obj.name}' in {1e3 * (ttime.monotonic() - t0):.00f} ms") else: train_model(obj.model, hypers=cached_hypers) @@ -585,31 +583,54 @@ def posterior(self, x): @property def fitness_model(self): - active_fitness_models = self.objectives(active=True, kind="fitness") - if len(active_fitness_models) == 0: - return GenericDeterministicModel(f=lambda x: torch.ones(x.shape[:-1]).unsqueeze(-1)) - if len(active_fitness_models) == 1: - return active_fitness_models[0].model - return ModelListGP(*[obj.model for obj in active_fitness_models]) + active_fitness_objectives = self.objectives(active=True, fitness=True) + if len(active_fitness_objectives) == 0: + # A dummy model that outputs noise, for when there are only constraints. + dummy_X = self.sample(n=256, normalize=True).squeeze(-2) + dummy_Y = torch.rand(size=(*dummy_X.shape[:-1], 1), dtype=torch.double) + return construct_single_task_model(X=dummy_X, y=dummy_Y, min_noise=1e2, max_noise=2e2) + if len(active_fitness_objectives) == 1: + return active_fitness_objectives[0].model + return ModelListGP(*[obj.model for obj in active_fitness_objectives]) + + # @property + # def pseudofitness_model(self): + # """ + # In the case that we have all constraints, there is no fitness model. In that case, + # we replace the fitness model with a + # """ + # active_fitness_objectives = self.objectives(active=True, fitness=True) + # if len(active_fitness_objectives) == 0: + # # A dummy model that outputs all ones, for when there are only constraints. + # dummy_X = self.sample(n=256, normalize=True).squeeze(-2) + # dummy_Y = torch.ones(size=(*dummy_X.shape[:-1], 1), dtype=torch.double) + # return construct_single_task_model(X=dummy_X, y=dummy_Y) + # if len(active_fitness_objectives) == 1: + # return active_fitness_objectives[0].model + # return ModelListGP(*[obj.model for obj in active_fitness_objectives]) @property def evaluated_constraints(self): - constraint_objectives = self.objectives(kind="constraint") + constraint_objectives = self.objectives(constraint=True) raw_targets_dict = self.raw_targets() if len(constraint_objectives): - return torch.cat([obj.constrain(raw_targets_dict[obj.name]) for obj in constraint_objectives], dim=-1) + return torch.cat( + [obj.constrain(raw_targets_dict[obj.name]).unsqueeze(-1) for obj in constraint_objectives], dim=-1 + ) else: return torch.ones(size=(len(self._table), 0), dtype=torch.bool) def fitness_scalarization(self, weights="default"): - fitness_objectives = self.objectives(active=True, kind="fitness") + active_fitness_objectives = self.objectives(active=True, fitness=True) + if len(active_fitness_objectives) == 0: + return ScalarizedPosteriorTransform(weights=torch.tensor([1.0], dtype=torch.double)) if weights == "default": - weights = torch.tensor([obj.weight for obj in fitness_objectives], dtype=torch.double) + weights = torch.tensor([obj.weight for obj in active_fitness_objectives], dtype=torch.double) elif weights == "equal": - weights = torch.ones(len(fitness_objectives), dtype=torch.double) + weights = torch.ones(len(active_fitness_objectives), dtype=torch.double) elif weights == "random": - weights = torch.rand(len(fitness_objectives), dtype=torch.double) - weights *= len(fitness_objectives) / weights.sum() + weights = torch.rand(len(active_fitness_objectives), dtype=torch.double) + weights *= len(active_fitness_objectives) / weights.sum() elif not isinstance(weights, torch.Tensor): raise ValueError(f"'weights' must be a Tensor or one of ['default', 'equal', 'random'], and not {weights}.") return ScalarizedPosteriorTransform(weights=weights) @@ -620,10 +641,10 @@ def scalarized_fitnesses(self, weights="default", constrained=True): If constrained=True, the points that satisfy the most constraints are automatically better than the others. """ - fitness_objs = self.objectives(kind="fitness") + fitness_objs = self.objectives(fitness=True) if len(fitness_objs) >= 1: f = self.fitness_scalarization(weights=weights).evaluate( - self.train_targets(active=True, kind="fitness", concatenate=True) + self.train_targets(active=True, fitness=True, concatenate=True) ) f = torch.where(f.isnan(), -np.inf, f) # remove all nans else: @@ -646,7 +667,7 @@ def pareto_mask(self): Returns a mask of all points that satisfy all constraints and are Pareto efficient. A point is Pareto efficient if it is there is no other point that is better at every objective. """ - Y = self.train_targets(active=True, kind="fitness", concatenate=True) + Y = self.train_targets(active=True, fitness=True, concatenate=True) # nuke the bad points Y[~self.evaluated_constraints.all(axis=-1)] = -np.inf @@ -669,7 +690,7 @@ def min_ref_point(self): @property def random_ref_point(self): - return self.train_targets(active=True, kind="fitness", concatenate=True)[self.argmax_best_f(weights="random")] + return self.train_targets(active=True, fitness=True, concatenate=True)[self.argmax_best_f(weights="random")] @property def all_objectives_valid(self): @@ -747,9 +768,7 @@ def _get_acquisition_function(self, identifier, return_metadata=False): found in `agent.all_acqfs`. """ - acquisition._construct_acqf(self, identifier=identifier, return_metadata=return_metadata) - - return + return acquisition._construct_acqf(self, identifier=identifier, return_metadata=return_metadata) def _latent_dim_tuples(self, obj_index=None): """ @@ -779,7 +798,7 @@ def sample_domain(self): Read-only DOFs are set to exactly their last known value. Discrete DOFs are relaxed to some continuous domain. """ - return self.dofs(active=True).transform(self.dofs(active=True).search_domain.T) + return self.dofs(active=True).transform(self.dofs(active=True).search_domain.T).clone() @property def input_normalization(self): @@ -842,9 +861,9 @@ def _set_hypers(self, hypers): def constraint(self, x): p = torch.ones(x.shape[:-1]) for obj in self.objectives(active=True): - # if the targeting constraint is non-trivial - # if obj.kind == "constraint": - # p *= obj.targeting_constraint(x) + # if the constraint is non-trivial + if obj.constraint is not None: + p *= obj.constraint_probability(x) # if the validity constaint is non-trivial if obj.validity_conjugate_model is not None: p *= obj.validity_constraint(x) @@ -1009,9 +1028,9 @@ def plot_objectives(self, axes: Tuple = (0, 1), **kwargs): """ if len(self.dofs(active=True, read_only=False)) == 1: - if len(self.objectives(active=True, kind="fitness")) > 0: + if len(self.objectives(active=True, fitness=True)) > 0: plotting._plot_fitness_objs_one_dof(self, **kwargs) - if len(self.objectives(active=True, kind="constraint")) > 0: + if len(self.objectives(active=True, constraint=True)) > 0: plotting._plot_constraint_objs_one_dof(self, **kwargs) else: plotting._plot_objs_many_dofs(self, axes=axes, **kwargs) diff --git a/src/blop/bayesian/models.py b/src/blop/bayesian/models.py index 7ee3faf..c125a1d 100644 --- a/src/blop/bayesian/models.py +++ b/src/blop/bayesian/models.py @@ -20,6 +20,8 @@ def construct_single_task_model(X, y, skew_dims=None, min_noise=1e-6, max_noise= Construct an untrained model for an objective. """ + skew_dims = skew_dims if skew_dims is not None else [(i,) for i in range(X.shape[-1])] + likelihood = gpytorch.likelihoods.GaussianLikelihood( noise_constraint=gpytorch.constraints.Interval( torch.tensor(min_noise), diff --git a/src/blop/objectives.py b/src/blop/objectives.py index 54c8bd1..f8a3dd6 100644 --- a/src/blop/objectives.py +++ b/src/blop/objectives.py @@ -1,6 +1,4 @@ from collections.abc import Iterable, Sequence -from dataclasses import dataclass, field -from typing import List, Tuple, Union import numpy as np import pandas as pd @@ -12,21 +10,21 @@ DEFAULT_MAX_NOISE_LEVEL = 1e0 OBJ_FIELD_TYPES = { - "name": "str", - "description": "object", - "type": "str", - "kind": "str", - "target": "object", - "transform": "str", - "domain": "str", - "trust_domain": "object", - "weight": "float", - "units": "object", - "noise_bounds": "object", - "noise": "float", - "n_valid": "int", - "latent_groups": "object", - "active": "bool", + "name": str, + "description": object, + "active": bool, + "type": str, + "units": object, + "target": object, + "constraint": object, + "transform": str, + "domain": str, + "trust_domain": object, + "weight": float, + "noise_bounds": object, + "noise": float, + "n_valid": int, + "latent_groups": object, } SUPPORTED_OBJ_TYPES = ["continuous", "binary", "ordinal", "categorical"] @@ -34,16 +32,13 @@ class DuplicateNameError(ValueError): - ... + pass domains = {"log"} def _validate_obj_transform(transform): - if transform is None: - return (-np.inf, np.inf) - if transform not in TRANSFORM_DOMAINS: raise ValueError(f"'transform' must be a callable with one argument, or one of {TRANSFORM_DOMAINS}") @@ -61,7 +56,6 @@ def _validate_continuous_domains(trust_domain, domain): raise ValueError(f"The trust domain {trust_domain} is outside the transform domain {domain}.") -@dataclass class Objective: """An objective to be used by an agent. @@ -93,34 +87,55 @@ class Objective: DOFs will be modeled independently. """ - name: str - description: str = "" - type: str = "continuous" - target: Union[Tuple[float, float], float, str] = "max" - transform: str = None - weight: float = 1.0 - active: bool = True - trust_domain: Tuple[float, float] or None = None - min_noise: float = DEFAULT_MIN_NOISE_LEVEL - max_noise: float = DEFAULT_MAX_NOISE_LEVEL - units: str = None - latent_groups: List[Tuple[str, ...]] = field(default_factory=list) - - def __post_init__(self): - if self.transform is not None: - _validate_obj_transform(self.transform) + def __init__( + self, + name: str, + description: str = "", + type: str = "continuous", + target: float | str | None = None, + constraint: tuple[float, float] | set | None = None, + transform: str = None, + weight: float = 1.0, + active: bool = True, + trust_domain: tuple[float, float] | None = None, + min_noise: float = DEFAULT_MIN_NOISE_LEVEL, + max_noise: float = DEFAULT_MAX_NOISE_LEVEL, + units: str = None, + latent_groups: list[tuple[str, ...]] = {}, + ): + self.name = name + self.units = units + self.description = description + self.type = type + self.active = active + + if (target is None) and (constraint is None): + raise ValueError("You must supply either a 'target' or a 'constraint'.") + + self.target = target + self.constraint = constraint + + if transform is not None: + _validate_obj_transform(transform) + + self.transform = transform + + if self.type == "continuous": + _validate_continuous_domains(trust_domain, self.domain) + else: + raise NotImplementedError() + + self.trust_domain = trust_domain + self.weight = weight if target is not None else None + self.min_noise = min_noise + self.max_noise = max_noise + self.latent_groups = latent_groups if isinstance(self.target, str): # eventually we will be able to target other strings, as outputs of a discrete objective if self.target not in ["min", "max"]: raise ValueError("'target' must be either 'min', 'max', a number, or a tuple of numbers.") - self.use_as_constraint = True if isinstance(self.target, tuple) else False - - @property - def kind(self): - return "fitness" if self.target in ["min", "max"] else "constraint" - @property def domain(self): """ @@ -132,12 +147,12 @@ def domain(self): return TRANSFORM_DOMAINS[self.transform] def constrain(self, y): - """ - The total domain of the objective. - """ - if self.kind != "constraint": + if self.constraint is None: raise RuntimeError("Cannot call 'constrain' with a non-constraint objective.") - return (y > self.target[0]) & (y < self.target[1]) + elif isinstance(self.constraint, tuple): + return (y > self.constraint[0]) & (y < self.constraint[1]) + else: + return np.array([value in self.constraint for value in np.atleast_1d(y)]) @property def _trust_domain(self): @@ -189,7 +204,7 @@ def noise_bounds(self) -> tuple: @property def summary(self) -> pd.Series: - series = pd.Series(index=list(OBJ_FIELD_TYPES.keys()), dtype="object") + series = pd.Series(index=list(OBJ_FIELD_TYPES.keys()), dtype=object) for attr in series.index: value = getattr(self, attr) @@ -217,42 +232,34 @@ def snr(self) -> float: def n_valid(self) -> int: return int((~self.model.train_targets.isnan()).sum()) if hasattr(self, "model") else 0 - def targeting_constraint(self, x: torch.Tensor) -> torch.Tensor: - if not isinstance(self.target, tuple): - return None + def constraint_probability(self, x: torch.Tensor) -> torch.Tensor: + if self.constraint is None: + raise RuntimeError("Cannot call 'constrain' with a non-constraint objective.") - a, b = self.target + a, b = self.constraint p = self.model.posterior(x) m = p.mean s = p.variance.sqrt() sish = s + 0.1 * m.std() # for numerical stability - return ( + p = ( 0.5 * (approximate_erf((b - m) / (np.sqrt(2) * sish)) - approximate_erf((a - m) / (np.sqrt(2) * sish)))[..., -1] - ) + ) # noqa - @property - def is_fitness(self): - return self.target in ["min", "max"] - - def value_prediction(self, X): - p = self.model.posterior(X) - - if self.is_fitness: - return self.fitness_inverse(p.mean) + return p.detach() - if isinstance(self.target, tuple): - return p.mean - - def fitness_prediction(self, X): - p = self.model.posterior(X) + def pseudofitness(self, x: torch.tensor) -> torch.tensor: + """ + When the optimization problem consists only of constraints, the + """ + p = self.model.posterior(x) if self.is_fitness: return self.fitness_inverse(p.mean) if isinstance(self.target, tuple): - return self.targeting_constraint(X).log().clamp(min=-16) + return self.constraint_probability(x).log().clamp(min=-16) @property def model(self): @@ -273,7 +280,7 @@ def names(self): def __getattr__(self, attr): # This is called if we can't find the attribute in the normal way. if all([hasattr(obj, attr) for obj in self.objectives]): - if OBJ_FIELD_TYPES.get(attr) in ["float", "int", "bool"]: + if OBJ_FIELD_TYPES.get(attr) in [float, "int", "bool"]: return np.array([getattr(obj, attr) for obj in self.objectives]) return [getattr(obj, attr) for obj in self.objectives] if attr in self.names: @@ -300,38 +307,41 @@ def __len__(self): @property def summary(self) -> pd.DataFrame: - table = pd.DataFrame(columns=list(OBJ_FIELD_TYPES.keys()), index=np.arange(len(self))) + # table = pd.DataFrame(columns=list(OBJ_FIELD_TYPES.keys()), index=np.arange(len(self))) - for index, obj in enumerate(self.objectives): - for attr, value in obj.summary.items(): - table.at[index, attr] = value + # for index, obj in enumerate(self.objectives): + # for attr, value in obj.summary.items(): + # table.at[index, attr] = value - for attr, dtype in OBJ_FIELD_TYPES.items(): - table[attr] = table[attr].astype(dtype) + # for attr, dtype in OBJ_FIELD_TYPES.items(): + # table[attr] = table[attr].astype(dtype) - return table + return pd.concat([objective.summary for objective in self.objectives], axis=1) def __repr__(self): - return self.summary.T.__repr__() + return self.summary.__repr__() def _repr_html_(self): - return self.summary.T._repr_html_() + return self.summary._repr_html_() def add(self, objective): self.objectives.append(objective) @staticmethod - def _test_obj(obj, active=None, kind=None): + def _test_obj(obj, active=None, fitness=None, constraint=None): if active is not None: if obj.active != active: return False - if kind is not None: - if obj.kind != kind: + if fitness is not None: + if obj.target is None: + return False + if constraint is not None: + if obj.constraint is None: return False return True - def subset(self, active=None, kind=None): - return ObjectiveList([obj for obj in self.objectives if self._test_obj(obj, active=active, kind=kind)]) + def subset(self, **kwargs): + return ObjectiveList([obj for obj in self.objectives if self._test_obj(obj, **kwargs)]) def transform(self, Y): """ diff --git a/src/blop/plotting.py b/src/blop/plotting.py index 91b7ca7..39fb5bd 100644 --- a/src/blop/plotting.py +++ b/src/blop/plotting.py @@ -14,7 +14,7 @@ def _plot_fitness_objs_one_dof(agent, size=16, lw=1e0): - fitness_objs = agent.objectives(kind="fitness") + fitness_objs = agent.objectives(fitness=True) agent.obj_fig, agent.obj_axes = plt.subplots( len(fitness_objs), @@ -97,7 +97,7 @@ def _plot_constraint_objs_one_dof(agent, size=16, lw=1e0): val_ax.scatter(x_values, obj_values, s=size, color=color) - con_ax.plot(test_x, obj.targeting_constraint(test_model_inputs).detach()) + con_ax.plot(test_x, obj.constraint_probability(test_model_inputs).detach()) for z in [0, 1, 2]: val_ax.fill_between( @@ -180,8 +180,8 @@ def _plot_objs_many_dofs(agent, axes=(0, 1), shading="nearest", cmap=DEFAULT_COL # test_values = obj.fitness_inverse(test_mean) if obj.kind == "fitness" else test_mean test_constraint = None - if not obj.kind == "fitness": - test_constraint = obj.targeting_constraint(model_inputs).detach().squeeze().numpy() + if obj.constraint is not None: + test_constraint = obj.constraint_probability(model_inputs).detach().squeeze().numpy() if gridded: # _ = agent.obj_axes[obj_index, 1].pcolormesh( @@ -192,7 +192,7 @@ def _plot_objs_many_dofs(agent, axes=(0, 1), shading="nearest", cmap=DEFAULT_COL # cmap=cmap, # norm=val_norm, # ) - if obj.kind == "fitness": + if obj.constraint is not None: fitness_ax = agent.obj_axes[obj_index, 1].pcolormesh( test_x, test_y, @@ -229,7 +229,7 @@ def _plot_objs_many_dofs(agent, axes=(0, 1), shading="nearest", cmap=DEFAULT_COL # norm=val_norm, # cmap=cmap, # ) - if obj.kind == "fitness": + if obj.constraint is not None: fitness_ax = agent.obj_axes[obj_index, 1].scatter( test_x, test_y, @@ -260,7 +260,7 @@ def _plot_objs_many_dofs(agent, axes=(0, 1), shading="nearest", cmap=DEFAULT_COL val_cbar = agent.obj_fig.colorbar(val_ax, ax=agent.obj_axes[obj_index, 0], location="bottom", aspect=32, shrink=0.8) val_cbar.set_label(f"{obj.units or ''}") - if obj.kind == "fitness": + if obj.constraint is not None: _ = agent.obj_fig.colorbar(fitness_ax, ax=agent.obj_axes[obj_index, 1], location="bottom", aspect=32, shrink=0.8) _ = agent.obj_fig.colorbar(fit_err_ax, ax=agent.obj_axes[obj_index, 2], location="bottom", aspect=32, shrink=0.8) @@ -539,7 +539,7 @@ def inspect_beam(agent, index, border=None): def _plot_pareto_front(agent, obj_indices=(0, 1)): - f_objs = agent.objectives(kind="fitness") + f_objs = agent.objectives(fitness=True) (i, j) = obj_indices if len(f_objs) < 2: @@ -547,7 +547,7 @@ def _plot_pareto_front(agent, obj_indices=(0, 1)): fig, ax = plt.subplots(1, 1, figsize=(6, 6)) - y = agent.train_targets(kind="fitness", concatenate=True) + y = agent.train_targets(fitness=True, concatenate=True) pareto_mask = agent.pareto_mask constraint = agent.evaluated_constraints.all(axis=-1) diff --git a/src/blop/tests/conftest.py b/src/blop/tests/conftest.py index 84acb41..8c3c9a2 100644 --- a/src/blop/tests/conftest.py +++ b/src/blop/tests/conftest.py @@ -76,7 +76,7 @@ def get_agent(param): elif param == "1d_1c": return Agent( dofs=DOF(description="The first DOF", name="x1", search_domain=(-5.0, 5.0)), - objectives=Objective(description="Himmelblau’s function", name="himmelblau", target=(95, 105)), + objectives=Objective(description="Himmelblau’s function", name="himmelblau", constraint=(95, 105)), digestion=sketchy_himmelblau_digestion, ) @@ -90,6 +90,19 @@ def get_agent(param): digestion=sketchy_himmelblau_digestion, ) + elif param == "2d_2c": + return Agent( + dofs=[ + DOF(description="The first DOF", name="x1", search_domain=(-5.0, 5.0)), + DOF(description="The first DOF", name="x2", search_domain=(-5.0, 5.0)), + ], + objectives=[ + Objective(description="Himmelblau’s function", name="himmelblau", target="min"), + Objective(description="Himmelblau’s function", name="himmelblau", constraint=(95, 105)), + ], + digestion=sketchy_himmelblau_digestion, + ) + elif param == "2d_1f_1c": return Agent( dofs=[ @@ -98,7 +111,7 @@ def get_agent(param): ], objectives=[ Objective(description="Himmelblau’s function", name="himmelblau", target="min"), - Objective(description="Himmelblau’s function", name="himmelblau", target=(95, 105)), + Objective(description="Himmelblau’s function", name="himmelblau", constraint=(95, 105)), ], digestion=sketchy_himmelblau_digestion, ) @@ -112,8 +125,8 @@ def get_agent(param): objectives=[ Objective(description="f1", name="f1", target="min"), Objective(description="f2", name="f2", target="min"), - Objective(description="c1", name="c1", target=(-np.inf, 225)), - Objective(description="c2", name="c2", target=(-np.inf, 0)), + Objective(description="c1", name="c1", constraint=(-np.inf, 225)), + Objective(description="c2", name="c2", constraint=(-np.inf, 0)), ], digestion=chankong_and_haimes_digestion, ) @@ -130,7 +143,7 @@ def get_agent(param): objectives=[ Objective(name="himmelblau", target="min"), Objective(name="himmelblau_transpose", target="min"), - Objective(description="Himmelblau’s function", name="himmelblau", target=(95, 105)), + Objective(description="Himmelblau’s function", name="himmelblau", constraint=(95, 105)), ], digestion=sketchy_himmelblau_digestion, ) From ca41303ea8707fc7ba0e0b2b60721be788a3ddbc Mon Sep 17 00:00:00 2001 From: Thomas Morris Date: Thu, 21 Nov 2024 00:28:16 -0500 Subject: [PATCH 2/5] revert to 3.9 typing syntax --- src/blop/objectives.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/blop/objectives.py b/src/blop/objectives.py index f8a3dd6..1f9f10d 100644 --- a/src/blop/objectives.py +++ b/src/blop/objectives.py @@ -1,4 +1,5 @@ from collections.abc import Iterable, Sequence +from typing import Union import numpy as np import pandas as pd @@ -92,12 +93,12 @@ def __init__( name: str, description: str = "", type: str = "continuous", - target: float | str | None = None, - constraint: tuple[float, float] | set | None = None, + target: Union[float, str, None] = None, + constraint: Union[tuple[float, float], set, None] = None, transform: str = None, weight: float = 1.0, active: bool = True, - trust_domain: tuple[float, float] | None = None, + trust_domain: Union[tuple[float, float], None] = None, min_noise: float = DEFAULT_MIN_NOISE_LEVEL, max_noise: float = DEFAULT_MAX_NOISE_LEVEL, units: str = None, From 041d0945c33deca505660c87a363277821d6260a Mon Sep 17 00:00:00 2001 From: Thomas Morris Date: Thu, 21 Nov 2024 00:31:43 -0500 Subject: [PATCH 3/5] fix notebook syntax --- docs/source/tutorials/hyperparameters.ipynb | 20 ++++---- docs/source/tutorials/introduction.ipynb | 54 ++++++++++---------- docs/source/tutorials/kb-mirrors.ipynb | 20 ++++---- docs/source/tutorials/pareto-fronts.ipynb | 24 ++++----- docs/source/tutorials/passive-dofs.ipynb | 12 ++--- docs/wip/constrained-himmelblau copy.ipynb | 56 ++++++++++----------- docs/wip/custom-acquisition.ipynb | 8 +-- docs/wip/introduction.ipynb | 34 ++++++------- docs/wip/latent-toroid-dimensions.ipynb | 12 ++--- docs/wip/multi-task-sirepo.ipynb | 18 +++---- src/blop/dofs.py | 2 +- src/blop/utils/functions.py | 5 +- 12 files changed, 131 insertions(+), 134 deletions(-) diff --git a/docs/source/tutorials/hyperparameters.ipynb b/docs/source/tutorials/hyperparameters.ipynb index f58f10c..4373433 100644 --- a/docs/source/tutorials/hyperparameters.ipynb +++ b/docs/source/tutorials/hyperparameters.ipynb @@ -3,7 +3,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e7b5e13a-c059-441d-8d4f-fff080d52054", + "id": "0", "metadata": {}, "source": [ "# Hyperparameters\n", @@ -14,7 +14,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22438de8", + "id": "1", "metadata": {}, "outputs": [], "source": [ @@ -37,7 +37,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "7a88c7bd", + "id": "2", "metadata": {}, "source": [ "The optimization goes faster if our model understands how the function changes as we change the inputs in different ways. The way it picks up on this is by starting from a general model that could describe a lot of functions, and making it specific to this one by choosing the right hyperparameters. Our Bayesian agent is very good at this, and only needs a few samples to figure out what the function looks like:" @@ -46,7 +46,7 @@ { "cell_type": "code", "execution_count": null, - "id": "7e9c949e", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -60,7 +60,7 @@ { "cell_type": "code", "execution_count": null, - "id": "071a829f-a390-40dc-9d5b-ae75702e119e", + "id": "4", "metadata": { "tags": [] }, @@ -97,7 +97,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "9ab3be01", + "id": "5", "metadata": {}, "source": [ "In addition to modeling the fitness of the task, the agent models the probability that an input will be feasible:" @@ -106,7 +106,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bc53bf67", + "id": "6", "metadata": { "tags": [] }, @@ -118,7 +118,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ebc65169", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -129,7 +129,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b70eaf9b", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -153,7 +153,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.9.20" }, "vscode": { "interpreter": { diff --git a/docs/source/tutorials/introduction.ipynb b/docs/source/tutorials/introduction.ipynb index 277a246..3bb1fb6 100644 --- a/docs/source/tutorials/introduction.ipynb +++ b/docs/source/tutorials/introduction.ipynb @@ -3,7 +3,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e7b5e13a-c059-441d-8d4f-fff080d52054", + "id": "0", "metadata": {}, "source": [ "# Introduction (Himmelblau's function)\n", @@ -13,7 +13,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "c18ef717", + "id": "1", "metadata": {}, "source": [ "Let's use ``blop`` to minimize Himmelblau's function, which has four global minima:" @@ -22,7 +22,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cf27fc9e-d11c-40f4-a200-98e7814f506b", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -34,7 +34,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22438de8", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -56,7 +56,7 @@ }, { "cell_type": "markdown", - "id": "2500c410", + "id": "4", "metadata": {}, "source": [ "There are several things that our agent will need. The first ingredient is some degrees of freedom (these are always `ophyd` devices) which the agent will move around to different inputs within each DOF's bounds (the second ingredient). We define these here:" @@ -65,7 +65,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5d6df7a4", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -79,7 +79,7 @@ }, { "cell_type": "markdown", - "id": "54b6f23e", + "id": "6", "metadata": {}, "source": [ "We also need to give the agent something to do. We want our agent to look in the feedback for a variable called 'himmelblau', and try to minimize it." @@ -88,7 +88,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c8556bc9", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -100,7 +100,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "7a88c7bd", + "id": "8", "metadata": {}, "source": [ "In our digestion function, we define our objective as a deterministic function of the inputs:" @@ -109,7 +109,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e6bfcf73", + "id": "9", "metadata": {}, "outputs": [], "source": [ @@ -123,7 +123,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "0d3d91c3", + "id": "10", "metadata": {}, "source": [ "We then combine these ingredients into an agent, giving it an instance of ``databroker`` so that it can see the output of the plans it runs." @@ -132,7 +132,7 @@ { "cell_type": "code", "execution_count": null, - "id": "071a829f-a390-40dc-9d5b-ae75702e119e", + "id": "11", "metadata": { "tags": [] }, @@ -150,7 +150,7 @@ }, { "cell_type": "markdown", - "id": "27685849", + "id": "12", "metadata": {}, "source": [ "Without any data, we can't make any inferences about what the function looks like, and so we can't use any non-trivial acquisition functions. Let's start by quasi-randomly sampling the parameter space, and plotting our model of the function:" @@ -159,7 +159,7 @@ { "cell_type": "code", "execution_count": null, - "id": "996da937", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -169,7 +169,7 @@ }, { "cell_type": "markdown", - "id": "dc264346-10fb-4c88-9925-4bfcf0dd3b07", + "id": "14", "metadata": {}, "source": [ "To decide which points to sample, the agent needs an acquisition function. The available acquisition function are here:" @@ -178,7 +178,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fb06739b", + "id": "15", "metadata": {}, "outputs": [], "source": [ @@ -188,7 +188,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "ab608930", + "id": "16", "metadata": {}, "source": [ "Now we can start to learn intelligently. Using the shorthand acquisition functions shown above, we can see the output of a few different ones:" @@ -197,7 +197,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43b55f4f", + "id": "17", "metadata": {}, "outputs": [], "source": [ @@ -207,7 +207,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "18210f81-0e23-42b7-8589-77dc260e3131", + "id": "18", "metadata": {}, "source": [ "To decide where to go, the agent will find the inputs that maximize a given acquisition function:" @@ -216,7 +216,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b902172e-e89c-4346-89f3-bf9571cba6b3", + "id": "19", "metadata": { "tags": [] }, @@ -228,7 +228,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "9a888385-4e09-4fea-9282-cd6a6fe2c3df", + "id": "20", "metadata": {}, "source": [ "We can also ask the agent for multiple points to sample and it will jointly maximize the acquisition function over all sets of inputs, and find the most efficient route between them:" @@ -237,7 +237,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28c5c0df", + "id": "21", "metadata": { "tags": [] }, @@ -251,7 +251,7 @@ }, { "cell_type": "markdown", - "id": "23f3f7ef-c024-4ac1-9144-d0b6fb8a3944", + "id": "22", "metadata": {}, "source": [ "All of this is automated inside the ``learn`` method, which will find a point (or points) to sample, sample them, and retrain the model and its hyperparameters with the new data. To do 4 learning iterations of 8 points each, we can run" @@ -260,7 +260,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ff1c5f1c", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -269,7 +269,7 @@ }, { "cell_type": "markdown", - "id": "b52f3352-3b67-431c-b5af-057e02def5ba", + "id": "24", "metadata": {}, "source": [ "Our agent has found all the global minima of Himmelblau's function using Bayesian optimization, and we can ask it for the best point: " @@ -278,7 +278,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0d5cc0c8-33cf-4fb1-b91c-81828e249f6a", + "id": "25", "metadata": {}, "outputs": [], "source": [ @@ -303,7 +303,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.0" + "version": "3.9.20" }, "vscode": { "interpreter": { diff --git a/docs/source/tutorials/kb-mirrors.ipynb b/docs/source/tutorials/kb-mirrors.ipynb index 397ea78..c61a98d 100644 --- a/docs/source/tutorials/kb-mirrors.ipynb +++ b/docs/source/tutorials/kb-mirrors.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "markdown", - "id": "c1ac0e4b-d065-41af-97ec-b73bbc7dad7d", + "id": "0", "metadata": {}, "source": [ "# Hyperparameters\n", @@ -13,7 +13,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2a1798fa-b251-409e-9d2d-097240372b03", + "id": "1", "metadata": {}, "outputs": [], "source": [ @@ -26,7 +26,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0ca926ab-5859-49f3-a96a-73c401cc18e6", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -38,7 +38,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28ee6cfc-428a-4472-b503-bb072f939866", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -77,7 +77,7 @@ { "cell_type": "code", "execution_count": null, - "id": "491636be-1e04-45a2-a622-dc21e192d208", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -87,7 +87,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d6434fc9-682a-45fa-b100-465dfda3aff1", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -97,7 +97,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ef89c0cd-65be-4d1f-ab6d-c8a4ce251d7b", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -107,7 +107,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fb56db05-0fe5-46a5-8435-383f1e34f55d", + "id": "7", "metadata": {}, "outputs": [], "source": [] @@ -115,7 +115,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3.12.2 64-bit", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -129,7 +129,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.2" + "version": "3.9.20" }, "vscode": { "interpreter": { diff --git a/docs/source/tutorials/pareto-fronts.ipynb b/docs/source/tutorials/pareto-fronts.ipynb index 33793e9..ad219de 100644 --- a/docs/source/tutorials/pareto-fronts.ipynb +++ b/docs/source/tutorials/pareto-fronts.ipynb @@ -3,7 +3,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e7b5e13a-c059-441d-8d4f-fff080d52054", + "id": "0", "metadata": {}, "source": [ "# Multiobjective optimization with Pareto front mapping\n", @@ -14,7 +14,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cac0177b-576c-4f01-b306-2e9e8544a05c", + "id": "1", "metadata": {}, "outputs": [], "source": [ @@ -26,7 +26,7 @@ { "cell_type": "code", "execution_count": null, - "id": "120812d8-5de2-4efa-8fc6-2d8e4cd0693e", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -58,8 +58,8 @@ "objectives = [\n", " Objective(name=\"f1\", target=\"min\"),\n", " Objective(name=\"f2\", target=\"min\"),\n", - " Objective(name=\"c1\", target=(-np.inf, 225)),\n", - " Objective(name=\"c2\", target=(-np.inf, 0)),\n", + " Objective(name=\"c1\", constraint=(-np.inf, 225)),\n", + " Objective(name=\"c2\", constraint=(-np.inf, 0)),\n", "]\n", "\n", "agent = Agent(\n", @@ -74,7 +74,7 @@ }, { "cell_type": "markdown", - "id": "d81c6af2-0a4c-4d31-9b7c-cc3065550b98", + "id": "3", "metadata": {}, "source": [ "We can plot our fitness and constraint objectives to see their models:" @@ -83,7 +83,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c6c08f53-e96b-4987-ba3d-a93c84468b0b", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -92,7 +92,7 @@ }, { "cell_type": "markdown", - "id": "48b976e6-048a-4e16-9a1c-500203a2e195", + "id": "5", "metadata": {}, "source": [ "We can plot the Pareto front (the set of all Pareto-efficient points), which shows the trade-off between the two fitnesses. The points in blue comprise the Pareto front, while the points in red are either not Pareto efficient or are invalidated by one of the constraints." @@ -101,7 +101,7 @@ { "cell_type": "code", "execution_count": null, - "id": "990a877e-f533-419c-bf5d-569ad7e72c6b", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -110,7 +110,7 @@ }, { "cell_type": "markdown", - "id": "29d7f4fb-8f25-4b57-982f-28737fad2a7c", + "id": "7", "metadata": {}, "source": [ "We can explore the Pareto front by choosing a random point on the Pareto front and computing the expected improvement in the hypervolume of all fitness objectives with respect to that point (called the \"reference point\"). All this is done automatically with the `qnehvi` acquisition function:" @@ -119,7 +119,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b49e6233-b228-43a3-9d8a-722a82e93443", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -144,7 +144,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.0" + "version": "3.9.20" }, "vscode": { "interpreter": { diff --git a/docs/source/tutorials/passive-dofs.ipynb b/docs/source/tutorials/passive-dofs.ipynb index 5ff058e..f132719 100644 --- a/docs/source/tutorials/passive-dofs.ipynb +++ b/docs/source/tutorials/passive-dofs.ipynb @@ -3,7 +3,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e7b5e13a-c059-441d-8d4f-fff080d52054", + "id": "0", "metadata": {}, "source": [ "# Passive degrees of freedom\n", @@ -13,7 +13,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "c18ef717", + "id": "1", "metadata": {}, "source": [ "Passive dofs!" @@ -22,7 +22,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e6bfcf73", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -34,7 +34,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4cf5dbd1-e404-4504-b822-3956ca61ef74", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -70,7 +70,7 @@ { "cell_type": "code", "execution_count": null, - "id": "990a877e-f533-419c-bf5d-569ad7e72c6b", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -94,7 +94,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.0" + "version": "3.9.20" }, "vscode": { "interpreter": { diff --git a/docs/wip/constrained-himmelblau copy.ipynb b/docs/wip/constrained-himmelblau copy.ipynb index aa0e99f..2470567 100644 --- a/docs/wip/constrained-himmelblau copy.ipynb +++ b/docs/wip/constrained-himmelblau copy.ipynb @@ -3,7 +3,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e7b5e13a-c059-441d-8d4f-fff080d52054", + "id": "0", "metadata": {}, "source": [ "# Introduction (Himmelblau's function)\n", @@ -13,7 +13,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "c18ef717", + "id": "1", "metadata": {}, "source": [ "Let's use ``blop`` to minimize Himmelblau's function, subject to the constraint that $x_1^2 + x_2^2 < 50$. Our function looks like this:" @@ -22,7 +22,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22438de8", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -46,7 +46,7 @@ }, { "cell_type": "markdown", - "id": "2500c410", + "id": "3", "metadata": {}, "source": [ "There are several things that our agent will need. The first ingredient is some degrees of freedom (these are always `ophyd` devices) which the agent will move around to different inputs within each DOF's bounds (the second ingredient). We define these here:" @@ -55,7 +55,7 @@ { "cell_type": "code", "execution_count": null, - "id": "5d6df7a4", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -69,7 +69,7 @@ }, { "cell_type": "markdown", - "id": "54b6f23e", + "id": "5", "metadata": {}, "source": [ "We also need to give the agent something to do. We want our agent to look in the feedback for a variable called \"himmelblau\", and try to minimize it." @@ -78,7 +78,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c8556bc9", + "id": "6", "metadata": {}, "outputs": [], "source": [ @@ -90,7 +90,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "7a88c7bd", + "id": "7", "metadata": {}, "source": [ "In our digestion function, we define our objective as a deterministic function of the inputs, returning a `NaN` when we violate the constraint:" @@ -99,7 +99,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e6bfcf73", + "id": "8", "metadata": {}, "outputs": [], "source": [ @@ -115,7 +115,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "0d3d91c3", + "id": "9", "metadata": {}, "source": [ "We then combine these ingredients into an agent, giving it an instance of ``databroker`` so that it can see the output of the plans it runs." @@ -124,7 +124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "071a829f-a390-40dc-9d5b-ae75702e119e", + "id": "10", "metadata": { "tags": [] }, @@ -146,7 +146,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c4ec72a5", + "id": "11", "metadata": {}, "outputs": [], "source": [ @@ -157,7 +157,7 @@ }, { "cell_type": "markdown", - "id": "27685849", + "id": "12", "metadata": {}, "source": [ "Without any data, we can't make any inferences about what the function looks like, and so we can't use any non-trivial acquisition functions. " @@ -166,7 +166,7 @@ { "cell_type": "code", "execution_count": null, - "id": "996da937", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -177,7 +177,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fb2fa8e9", + "id": "14", "metadata": {}, "outputs": [], "source": [] @@ -185,7 +185,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e1a56c4c", + "id": "15", "metadata": {}, "outputs": [], "source": [] @@ -193,7 +193,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "9ab3be01", + "id": "16", "metadata": {}, "source": [ "In addition to modeling the fitness of the task, the agent models the probability that an input will be feasible:" @@ -202,7 +202,7 @@ { "cell_type": "code", "execution_count": null, - "id": "996c3c01-f91d-4a25-9b8d-eba5fa964504", + "id": "17", "metadata": { "tags": [] }, @@ -214,7 +214,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "ab608930", + "id": "18", "metadata": {}, "source": [ "It combines the estimate of the objective and the estimate of the feasibility in deciding where to go:" @@ -223,7 +223,7 @@ { "cell_type": "code", "execution_count": null, - "id": "28c5c0df", + "id": "19", "metadata": { "tags": [] }, @@ -235,7 +235,7 @@ { "cell_type": "code", "execution_count": null, - "id": "50d627fe", + "id": "20", "metadata": {}, "outputs": [], "source": [ @@ -248,7 +248,7 @@ { "cell_type": "code", "execution_count": null, - "id": "43b55f4f", + "id": "21", "metadata": {}, "outputs": [], "source": [ @@ -259,7 +259,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ca6cf39f", + "id": "22", "metadata": {}, "outputs": [], "source": [ @@ -272,7 +272,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ff1c5f1c", + "id": "23", "metadata": {}, "outputs": [], "source": [ @@ -282,7 +282,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "a79b56ac", + "id": "24", "metadata": {}, "source": [ "The agent automatically tries to avoid infeasible points, but will end up naturally exploring the boundary of the constraint. Let's see where the agent is thinking of going:" @@ -291,7 +291,7 @@ { "cell_type": "code", "execution_count": null, - "id": "bc53bf67", + "id": "25", "metadata": { "tags": [] }, @@ -304,7 +304,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "a43130e4", + "id": "26", "metadata": {}, "source": [ "The agent will naturally explore the whole parameter space" @@ -313,7 +313,7 @@ { "cell_type": "code", "execution_count": null, - "id": "50b6582d", + "id": "27", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/wip/custom-acquisition.ipynb b/docs/wip/custom-acquisition.ipynb index dbdae7f..5275e9d 100644 --- a/docs/wip/custom-acquisition.ipynb +++ b/docs/wip/custom-acquisition.ipynb @@ -3,7 +3,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e7b5e13a-c059-441d-8d4f-fff080d52054", + "id": "0", "metadata": {}, "source": [ "# Custom acquisition plans\n", @@ -24,7 +24,7 @@ { "cell_type": "code", "execution_count": null, - "id": "92c202e2", + "id": "1", "metadata": {}, "outputs": [], "source": [ @@ -45,7 +45,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0ba0bb16", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -70,7 +70,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "1075cb6a", + "id": "3", "metadata": {}, "source": [ "To find the inputs that lead to the tightest spectrum, we need to vary $\\mathbf{x}$, scan over $\\nu$, and then estimate the resolution for the agent to optimize over. Let's write acquisition and digestion functions to do this: " diff --git a/docs/wip/introduction.ipynb b/docs/wip/introduction.ipynb index cd124f2..89e60e9 100644 --- a/docs/wip/introduction.ipynb +++ b/docs/wip/introduction.ipynb @@ -3,7 +3,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e7b5e13a-c059-441d-8d4f-fff080d52054", + "id": "0", "metadata": {}, "source": [ "# Bayesian optimization" @@ -12,7 +12,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "c18ef717", + "id": "1", "metadata": {}, "source": [ "This tutorial is an introduction to the syntax used by the optimizer, as well as the principles of Bayesian optimization in general.\n", @@ -23,7 +23,7 @@ { "cell_type": "code", "execution_count": null, - "id": "22438de8", + "id": "2", "metadata": {}, "outputs": [], "source": [ @@ -42,7 +42,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "ecef8da5", + "id": "3", "metadata": {}, "source": [ "There are several things that our agent will need. The first ingredient is some degrees of freedom (these are always `ophyd` devices) which the agent will move around to different inputs within each DOF's bounds (the second ingredient). We define these here:" @@ -51,7 +51,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4c870567", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -65,7 +65,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c8556bc9", + "id": "5", "metadata": {}, "outputs": [], "source": [ @@ -77,7 +77,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "7a88c7bd", + "id": "6", "metadata": {}, "source": [ "\n", @@ -87,7 +87,7 @@ { "cell_type": "code", "execution_count": null, - "id": "e6bfcf73", + "id": "7", "metadata": {}, "outputs": [], "source": [ @@ -103,7 +103,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "dad64303", + "id": "8", "metadata": {}, "source": [ "The next ingredient is a task, which gives the agent something to do. We want it to minimize the Styblinski-Tang function, so we make a task that will try to minimize the output of the digestion function called \"styblinski-tang\"." @@ -112,7 +112,7 @@ { "cell_type": "code", "execution_count": null, - "id": "071a829f-a390-40dc-9d5b-ae75702e119e", + "id": "9", "metadata": { "tags": [] }, @@ -137,7 +137,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "d8f2da43", + "id": "10", "metadata": {}, "source": [ "We initialized the GP with the \"quasi-random\" strategy, as it doesn't require any prior data. We can view the state of the optimizer's posterior of the tasks over the input parameters:" @@ -146,7 +146,7 @@ { "cell_type": "code", "execution_count": null, - "id": "996c3c01-f91d-4a25-9b8d-eba5fa964504", + "id": "11", "metadata": { "tags": [] }, @@ -160,7 +160,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "fa141636", + "id": "12", "metadata": {}, "source": [ "Note that the value of the fitness is the negative value of the function: we always want to maximize the fitness of the tasks.\n", @@ -173,7 +173,7 @@ { "cell_type": "code", "execution_count": null, - "id": "589a263b", + "id": "13", "metadata": {}, "outputs": [], "source": [ @@ -183,7 +183,7 @@ { "cell_type": "code", "execution_count": null, - "id": "8554d7c2", + "id": "14", "metadata": { "tags": [] }, @@ -195,7 +195,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "2529763b", + "id": "15", "metadata": {}, "source": [ "Let's tell the agent to learn a little bit more. We just have to tell it what acquisition function to use (by passing a `strategy`) and how many iterations we'd like it to perform (by passing `n_iter`)." @@ -204,7 +204,7 @@ { "cell_type": "code", "execution_count": null, - "id": "ebc65169", + "id": "16", "metadata": {}, "outputs": [], "source": [ diff --git a/docs/wip/latent-toroid-dimensions.ipynb b/docs/wip/latent-toroid-dimensions.ipynb index 7d1ab91..b0d7143 100644 --- a/docs/wip/latent-toroid-dimensions.ipynb +++ b/docs/wip/latent-toroid-dimensions.ipynb @@ -3,7 +3,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e7b5e13a-c059-441d-8d4f-fff080d52054", + "id": "0", "metadata": {}, "source": [ "# Finding latent dimensions for the toroidal mirror \n", @@ -14,7 +14,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fa8a6989", + "id": "1", "metadata": { "tags": [] }, @@ -29,7 +29,7 @@ { "cell_type": "code", "execution_count": null, - "id": "071a829f-a390-40dc-9d5b-ae75702e119e", + "id": "2", "metadata": { "tags": [] }, @@ -59,7 +59,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "a6259a4f", + "id": "3", "metadata": {}, "source": [ "We can see that the beam is only not cut off (i.e. it has a non-zero flux) in a diagonal strip, and that in fact this is really just a one-dimensional optimization problem in some diagonal dimension. Our agent has figured this out, with a transformation matrix that has a long coherence length in one dimension and a short coherence length orthogonal to it:" @@ -68,7 +68,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6e17e666", + "id": "4", "metadata": {}, "outputs": [], "source": [ @@ -78,7 +78,7 @@ { "cell_type": "code", "execution_count": null, - "id": "996c3c01-f91d-4a25-9b8d-eba5fa964504", + "id": "5", "metadata": { "tags": [] }, diff --git a/docs/wip/multi-task-sirepo.ipynb b/docs/wip/multi-task-sirepo.ipynb index 62a7982..cf2b784 100644 --- a/docs/wip/multi-task-sirepo.ipynb +++ b/docs/wip/multi-task-sirepo.ipynb @@ -3,7 +3,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e7b5e13a-c059-441d-8d4f-fff080d52054", + "id": "0", "metadata": {}, "source": [ "# Multi-task optimization of KB mirrors\n", @@ -16,7 +16,7 @@ { "cell_type": "code", "execution_count": null, - "id": "fa8a6989", + "id": "1", "metadata": { "tags": [] }, @@ -31,7 +31,7 @@ { "cell_type": "code", "execution_count": null, - "id": "071a829f-a390-40dc-9d5b-ae75702e119e", + "id": "2", "metadata": { "tags": [] }, @@ -65,7 +65,7 @@ { "cell_type": "code", "execution_count": null, - "id": "2409e012", + "id": "3", "metadata": {}, "outputs": [], "source": [ @@ -75,7 +75,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "a6259a4f", + "id": "4", "metadata": {}, "source": [ "For each task, we plot the sampled data and the model's posterior with respect to two inputs to the KB mirrors. We can see that each tasks responds very differently to different motors, which is very useful to the optimizer. " @@ -84,7 +84,7 @@ { "cell_type": "code", "execution_count": null, - "id": "996c3c01-f91d-4a25-9b8d-eba5fa964504", + "id": "5", "metadata": { "tags": [] }, @@ -97,7 +97,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "296d9fd2", + "id": "6", "metadata": {}, "source": [ "We should find our optimum (or something close to it) on the very next iteration:" @@ -106,7 +106,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d6b39b54", + "id": "7", "metadata": { "tags": [] }, @@ -119,7 +119,7 @@ { "attachments": {}, "cell_type": "markdown", - "id": "e23e920c", + "id": "8", "metadata": {}, "source": [ "The agent has learned that certain dimensions affect different tasks differently!" diff --git a/src/blop/dofs.py b/src/blop/dofs.py index 95cc2fb..ddfe013 100644 --- a/src/blop/dofs.py +++ b/src/blop/dofs.py @@ -30,7 +30,7 @@ class ReadOnlyError(Exception): - ... + pass def _validate_dofs(dofs): diff --git a/src/blop/utils/functions.py b/src/blop/utils/functions.py index 26b16b6..08a39bf 100644 --- a/src/blop/utils/functions.py +++ b/src/blop/utils/functions.py @@ -99,10 +99,7 @@ def ackley(*x): """ X = np.c_[x] return ( - -20 * np.exp(-0.2 * np.sqrt(0.5 * (X**2).sum(axis=1))) - - np.exp(0.5 * np.cos(2 * np.pi * X).sum(axis=1)) - + np.e - + 20 + -20 * np.exp(-0.2 * np.sqrt(0.5 * (X**2).sum(axis=1))) - np.exp(0.5 * np.cos(2 * np.pi * X).sum(axis=1)) + np.e + 20 ) From 8714c1e17bcc72784b051d60717e6cbcfd400a6c Mon Sep 17 00:00:00 2001 From: Thomas Morris Date: Thu, 21 Nov 2024 00:43:31 -0500 Subject: [PATCH 4/5] add pure constraint agents to test coverage --- src/blop/tests/conftest.py | 7 ++++--- src/blop/tests/test_agents.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/blop/tests/conftest.py b/src/blop/tests/conftest.py index 8c3c9a2..e951dfa 100644 --- a/src/blop/tests/conftest.py +++ b/src/blop/tests/conftest.py @@ -49,16 +49,17 @@ def RE(db): single_task_agents = [ "1d_1f", - # "1d_1c", "2d_1f", "2d_1f_1c", "2d_2f_2c", "3d_2r_2f_1c", ] +nonpareto_multitask_agents = ["2d_2c"] + pareto_agents = ["2d_2f_2c", "3d_2r_2f_1c"] -all_agents = [*single_task_agents, *pareto_agents] +all_agents = [*single_task_agents, *nonpareto_multitask_agents, *pareto_agents] def get_agent(param): @@ -97,7 +98,7 @@ def get_agent(param): DOF(description="The first DOF", name="x2", search_domain=(-5.0, 5.0)), ], objectives=[ - Objective(description="Himmelblau’s function", name="himmelblau", target="min"), + Objective(description="Himmelblau’s function", name="himmelblau", constraint=(95, 105)), Objective(description="Himmelblau’s function", name="himmelblau", constraint=(95, 105)), ], digestion=sketchy_himmelblau_digestion, diff --git a/src/blop/tests/test_agents.py b/src/blop/tests/test_agents.py index 598117d..b405771 100644 --- a/src/blop/tests/test_agents.py +++ b/src/blop/tests/test_agents.py @@ -10,7 +10,7 @@ def test_agent(agent, RE, db): """ agent.db = db - RE(agent.learn("qr", n=32)) + RE(agent.learn("qr", n=64)) best = agent.best assert [dof.name in best for dof in agent.dofs] From 72dc293c36df76ab94b5168a603e5e8a17630687 Mon Sep 17 00:00:00 2001 From: Thomas Morris Date: Wed, 27 Nov 2024 13:03:42 -0500 Subject: [PATCH 5/5] add docstring --- src/blop/agent.py | 2 ++ src/blop/objectives.py | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/blop/agent.py b/src/blop/agent.py index 4e95546..df04907 100644 --- a/src/blop/agent.py +++ b/src/blop/agent.py @@ -192,6 +192,8 @@ def sample(self, n: int = DEFAULT_MAX_SAMPLES, normalize: bool = False, method: How many points to sample. method : str How to sample the points. Must be one of 'quasi-random', 'random', or 'grid'. + normalize: bool + If True, sample the unit hypercube. If False, sample the parameter space of the agent. """ active_dofs = self.dofs(active=True) diff --git a/src/blop/objectives.py b/src/blop/objectives.py index 1f9f10d..5479767 100644 --- a/src/blop/objectives.py +++ b/src/blop/objectives.py @@ -334,10 +334,10 @@ def _test_obj(obj, active=None, fitness=None, constraint=None): if obj.active != active: return False if fitness is not None: - if obj.target is None: + if fitness != (obj.target is not None): return False if constraint is not None: - if obj.constraint is None: + if constraint != (obj.constraint is not None): return False return True