diff --git a/.tools/envs/testenv-linux.yml b/.tools/envs/testenv-linux.yml index 39cfd0b69..e92c81552 100644 --- a/.tools/envs/testenv-linux.yml +++ b/.tools/envs/testenv-linux.yml @@ -28,6 +28,7 @@ dependencies: - jinja2 # dev, tests - annotated-types # dev, tests - iminuit # dev, tests + - pygad # dev, tests - pip: # dev, tests, docs - nevergrad # dev, tests - DFO-LS>=1.5.3 # dev, tests diff --git a/.tools/envs/testenv-numpy.yml b/.tools/envs/testenv-numpy.yml index 015dd9b52..00d70f973 100644 --- a/.tools/envs/testenv-numpy.yml +++ b/.tools/envs/testenv-numpy.yml @@ -26,6 +26,7 @@ dependencies: - jinja2 # dev, tests - annotated-types # dev, tests - iminuit # dev, tests + - pygad # dev, tests - pip: # dev, tests, docs - nevergrad # dev, tests - DFO-LS>=1.5.3 # dev, tests diff --git a/.tools/envs/testenv-others.yml b/.tools/envs/testenv-others.yml index e300065fe..3886725b9 100644 --- a/.tools/envs/testenv-others.yml +++ b/.tools/envs/testenv-others.yml @@ -26,6 +26,7 @@ dependencies: - jinja2 # dev, tests - annotated-types # dev, tests - iminuit # dev, tests + - pygad # dev, tests - pip: # dev, tests, docs - nevergrad # dev, tests - DFO-LS>=1.5.3 # dev, tests diff --git a/.tools/envs/testenv-pandas.yml b/.tools/envs/testenv-pandas.yml index 9f8fc6d7d..e20a085d9 100644 --- a/.tools/envs/testenv-pandas.yml +++ b/.tools/envs/testenv-pandas.yml @@ -26,6 +26,7 @@ dependencies: - jinja2 # dev, tests - annotated-types # dev, tests - iminuit # dev, tests + - pygad # dev, tests - pip: # dev, tests, docs - nevergrad # dev, tests - DFO-LS>=1.5.3 # dev, tests diff --git a/.tools/envs/testenv-plotly.yml b/.tools/envs/testenv-plotly.yml index 27504174b..6f7f47001 100644 --- a/.tools/envs/testenv-plotly.yml +++ b/.tools/envs/testenv-plotly.yml @@ -26,6 +26,7 @@ dependencies: - jinja2 # dev, tests - annotated-types # dev, tests - iminuit # dev, tests + - pygad # dev, tests - pip: # dev, tests, docs - nevergrad # dev, tests - DFO-LS>=1.5.3 # dev, tests diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index d9e43a004..b1b2c5371 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -4043,6 +4043,63 @@ these optimizers, you need to have initialization for speed. Default is False. ``` +## PyGAD Optimizer + +optimagic supports the [PyGAD](https://github.com/ahmedfgad/GeneticAlgorithmPython) +genetic algorithm optimizer. To use PyGAD, you need to have +[the pygad package](https://github.com/ahmedfgad/GeneticAlgorithmPython) installed +(`pip install pygad`). + +```{eval-rst} +.. dropdown:: pygad + + .. code-block:: + + "pygad" + + Minimize a scalar function using the PyGAD genetic algorithm. + + PyGAD is a Python library for building genetic algorithms and training machine learning algorithms. + Genetic algorithms are metaheuristics inspired by the process of natural selection that belong to + the larger class of evolutionary algorithms. These algorithms apply biologically inspired + operators such as mutation, crossover, and selection to optimization problems. + + The algorithm maintains a population of candidate solutions and iteratively improves them + through genetic operations, making it ideal for global optimization problems with complex + search spaces that may contain multiple local optima. + + The algorithm supports the following options: + + - **population_size** (int): Number of solutions in each generation. Default is None. + - **num_parents_mating** (int): Number of parents selected for mating in each generation. Default is None. + - **num_generations** (int): Number of generations. Default is None. + - **initial_population** (array-like): initial population is a 2D array where + each row represents a solution and each column represents a parameter (gene) value. + The number of rows must equal population_size, and the number of columns must + match the length of the initial parameters (x0). + When None, the population is randomly generated within the parameter bounds using + the specified population_size and the dimensionality from x0. + - **parent_selection_type** (str or callable): Method for selecting parents. Can be a string ("sss", "rws", "sus", "rank", "random", "tournament") or a custom function with signature ``parent_selection_func(fitness, num_parents, ga_instance) -> tuple[NDArray, NDArray]``. Default is "sss". + - **keep_parents** (int): Number of best parents to keep in the next generation. Only has effect when keep_elitism is 0. Default is -1. + - **keep_elitism** (int): Number of best solutions to preserve across generations. If non-zero, keep_parents has no effect. Default is 1. + - **K_tournament** (int): Tournament size for tournament selection. Only used when parent_selection_type is "tournament". Default is 3. + - **crossover_type** (str, callable, or None): Crossover method. Can be a string ("single_point", "two_points", "uniform", "scattered"), a custom function with signature ``crossover_func(parents, offspring_size, ga_instance) -> NDArray``, or None to disable crossover. Default is "single_point". + - **crossover_probability** (float): Probability of applying crossover. Range [0, 1]. Default is None. + - **mutation_type** (str, callable, or None): Mutation method. Can be a string ("random", "swap", "inversion", "scramble", "adaptive"), a custom function with signature ``mutation_func(offspring, ga_instance) -> NDArray``, or None to disable mutation. Default is "random". + - **mutation_probability** (float/list/tuple/array): Probability of mutation. Range [0, 1]. If specified, mutation_percent_genes and mutation_num_genes are ignored. Default is None. + - **mutation_percent_genes** (float/str/list/tuple/array): Percentage of genes to mutate. Default is "default" (equivalent to 10%). Ignored if mutation_probability or mutation_num_genes are specified. + - **mutation_num_genes** (int/list/tuple/array): Exact number of genes to mutate. Ignored if mutation_probability is specified. Default is None. + - **mutation_by_replacement** (bool): Whether to replace gene values during mutation. Only works with mutation_type="random". Default is False. + - **random_mutation_min_val** (float/list/array): Minimum value for random mutation. Only used with mutation_type="random". Default is -1.0. + - **random_mutation_max_val** (float/list/array): Maximum value for random mutation. Only used with mutation_type="random". Default is 1.0. + - **allow_duplicate_genes** (bool): Whether to allow duplicate gene values within a solution. Default is True. + - **fitness_batch_size** (int): Number of solutions to evaluate in parallel batches. When None and n_cores > 1, automatically set to n_cores for optimal parallelization. Default is None. + - **stop_criteria** (str/list): Early stopping criteria. Format: "reach_value" or "saturate_N". Default is None. + - **n_cores** (int): Number of cores for parallel fitness evaluation. Default is 1. + - **random_seed** (int): Random seed for reproducibility. Default is None. + +``` + ## References ```{eval-rst} diff --git a/environment.yml b/environment.yml index 0764ef7af..44851e575 100644 --- a/environment.yml +++ b/environment.yml @@ -38,6 +38,7 @@ dependencies: - furo # dev, docs - annotated-types # dev, tests - iminuit # dev, tests + - pygad # dev, tests - pip: # dev, tests, docs - nevergrad # dev, tests - DFO-LS>=1.5.3 # dev, tests diff --git a/pyproject.toml b/pyproject.toml index 133b181cf..74b67e460 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -381,5 +381,6 @@ module = [ "pdbp", "iminuit", "nevergrad", + "pygad", ] ignore_missing_imports = true diff --git a/src/optimagic/algorithms.py b/src/optimagic/algorithms.py index 588514e95..69a1f2745 100644 --- a/src/optimagic/algorithms.py +++ b/src/optimagic/algorithms.py @@ -38,6 +38,7 @@ NloptVAR, ) from optimagic.optimizers.pounders import Pounders +from optimagic.optimizers.pygad_optimizer import Pygad from optimagic.optimizers.pygmo_optimizers import ( PygmoBeeColony, PygmoCmaes, @@ -173,6 +174,7 @@ def Scalar( @dataclass(frozen=True) class BoundedGlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_pso: Type[NevergradPSO] = NevergradPSO + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -371,6 +373,7 @@ class BoundedGlobalGradientFreeScalarAlgorithms(AlgoSelection): nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -407,6 +410,7 @@ def Parallel(self) -> BoundedGlobalGradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_pso: Type[NevergradPSO] = NevergradPSO + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -463,6 +467,7 @@ def Scalar(self) -> GlobalGradientFreeNonlinearConstrainedParallelScalarAlgorith @dataclass(frozen=True) class GlobalGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_pso: Type[NevergradPSO] = NevergradPSO + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -611,6 +616,7 @@ def Scalar(self) -> BoundedGradientFreeNonlinearConstrainedParallelScalarAlgorit @dataclass(frozen=True) class BoundedGradientFreeParallelScalarAlgorithms(AlgoSelection): nevergrad_pso: Type[NevergradPSO] = NevergradPSO + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -706,6 +712,7 @@ def Scalar(self) -> BoundedGlobalNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalParallelScalarAlgorithms(AlgoSelection): nevergrad_pso: Type[NevergradPSO] = NevergradPSO + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1037,6 +1044,7 @@ class BoundedGlobalGradientFreeAlgorithms(AlgoSelection): nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -1101,6 +1109,7 @@ class GlobalGradientFreeScalarAlgorithms(AlgoSelection): nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -1141,6 +1150,7 @@ def Parallel(self) -> GlobalGradientFreeParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_pso: Type[NevergradPSO] = NevergradPSO + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1316,6 +1326,7 @@ class BoundedGradientFreeScalarAlgorithms(AlgoSelection): nlopt_newuoa: Type[NloptNEWUOA] = NloptNEWUOA nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_sbplx: Type[NloptSbplx] = NloptSbplx + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -1380,6 +1391,7 @@ def Parallel(self) -> BoundedGradientFreeLeastSquaresParallelAlgorithms: class BoundedGradientFreeParallelAlgorithms(AlgoSelection): nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1461,6 +1473,7 @@ def Scalar(self) -> GradientFreeNonlinearConstrainedParallelScalarAlgorithms: class GradientFreeParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_pso: Type[NevergradPSO] = NevergradPSO + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1534,6 +1547,7 @@ class BoundedGlobalScalarAlgorithms(AlgoSelection): nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -1579,6 +1593,7 @@ def Parallel(self) -> BoundedGlobalParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedGlobalParallelAlgorithms(AlgoSelection): nevergrad_pso: Type[NevergradPSO] = NevergradPSO + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1648,6 +1663,7 @@ def Scalar(self) -> GlobalNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalParallelScalarAlgorithms(AlgoSelection): nevergrad_pso: Type[NevergradPSO] = NevergradPSO + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -1883,6 +1899,7 @@ def Scalar(self) -> BoundedNonlinearConstrainedParallelScalarAlgorithms: @dataclass(frozen=True) class BoundedParallelScalarAlgorithms(AlgoSelection): nevergrad_pso: Type[NevergradPSO] = NevergradPSO + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2146,6 +2163,7 @@ class GlobalGradientFreeAlgorithms(AlgoSelection): nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -2240,6 +2258,7 @@ class BoundedGradientFreeAlgorithms(AlgoSelection): nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_sbplx: Type[NloptSbplx] = NloptSbplx pounders: Type[Pounders] = Pounders + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -2337,6 +2356,7 @@ class GradientFreeScalarAlgorithms(AlgoSelection): nlopt_neldermead: Type[NloptNelderMead] = NloptNelderMead nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS nlopt_sbplx: Type[NloptSbplx] = NloptSbplx + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -2409,6 +2429,7 @@ class GradientFreeParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2452,6 +2473,7 @@ class BoundedGlobalAlgorithms(AlgoSelection): nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -2534,6 +2556,7 @@ class GlobalScalarAlgorithms(AlgoSelection): nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -2583,6 +2606,7 @@ def Parallel(self) -> GlobalParallelScalarAlgorithms: @dataclass(frozen=True) class GlobalParallelAlgorithms(AlgoSelection): nevergrad_pso: Type[NevergradPSO] = NevergradPSO + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -2863,6 +2887,7 @@ class BoundedScalarAlgorithms(AlgoSelection): nlopt_sbplx: Type[NloptSbplx] = NloptSbplx nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -2950,6 +2975,7 @@ def Parallel(self) -> BoundedLeastSquaresParallelAlgorithms: class BoundedParallelAlgorithms(AlgoSelection): nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -3051,6 +3077,7 @@ def Scalar(self) -> NonlinearConstrainedParallelScalarAlgorithms: class ParallelScalarAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_pso: Type[NevergradPSO] = NevergradPSO + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -3170,6 +3197,7 @@ class GradientFreeAlgorithms(AlgoSelection): nlopt_praxis: Type[NloptPRAXIS] = NloptPRAXIS nlopt_sbplx: Type[NloptSbplx] = NloptSbplx pounders: Type[Pounders] = Pounders + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -3234,6 +3262,7 @@ class GlobalAlgorithms(AlgoSelection): nlopt_direct: Type[NloptDirect] = NloptDirect nlopt_esch: Type[NloptESCH] = NloptESCH nlopt_isres: Type[NloptISRES] = NloptISRES + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -3380,6 +3409,7 @@ class BoundedAlgorithms(AlgoSelection): nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR pounders: Type[Pounders] = Pounders + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -3517,6 +3547,7 @@ class ScalarAlgorithms(AlgoSelection): nlopt_sbplx: Type[NloptSbplx] = NloptSbplx nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch @@ -3631,6 +3662,7 @@ class ParallelAlgorithms(AlgoSelection): neldermead_parallel: Type[NelderMeadParallel] = NelderMeadParallel nevergrad_pso: Type[NevergradPSO] = NevergradPSO pounders: Type[Pounders] = Pounders + pygad: Type[Pygad] = Pygad pygmo_gaco: Type[PygmoGaco] = PygmoGaco pygmo_pso_gen: Type[PygmoPsoGen] = PygmoPsoGen scipy_brute: Type[ScipyBrute] = ScipyBrute @@ -3696,6 +3728,7 @@ class Algorithms(AlgoSelection): nlopt_tnewton: Type[NloptTNewton] = NloptTNewton nlopt_var: Type[NloptVAR] = NloptVAR pounders: Type[Pounders] = Pounders + pygad: Type[Pygad] = Pygad pygmo_bee_colony: Type[PygmoBeeColony] = PygmoBeeColony pygmo_cmaes: Type[PygmoCmaes] = PygmoCmaes pygmo_compass_search: Type[PygmoCompassSearch] = PygmoCompassSearch diff --git a/src/optimagic/config.py b/src/optimagic/config.py index 643a6f663..13ce4fee9 100644 --- a/src/optimagic/config.py +++ b/src/optimagic/config.py @@ -108,6 +108,14 @@ IS_NEVERGRAD_INSTALLED = True +try: + import pygad # noqa: F401 +except ImportError: + IS_PYGAD_INSTALLED = False +else: + IS_PYGAD_INSTALLED = True + + # ====================================================================================== # Check if pandas version is newer or equal to version 2.1.0 # ====================================================================================== diff --git a/src/optimagic/optimizers/pygad_optimizer.py b/src/optimagic/optimizers/pygad_optimizer.py new file mode 100644 index 000000000..030508221 --- /dev/null +++ b/src/optimagic/optimizers/pygad_optimizer.py @@ -0,0 +1,284 @@ +import warnings +from dataclasses import dataclass +from typing import Any, Literal + +import numpy as np +from numpy.typing import NDArray + +from optimagic import mark +from optimagic.config import IS_PYGAD_INSTALLED +from optimagic.exceptions import NotInstalledError +from optimagic.optimization.algo_options import get_population_size +from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult +from optimagic.optimization.internal_optimization_problem import ( + InternalOptimizationProblem, +) +from optimagic.typing import ( + AggregationLevel, + CrossoverFunction, + MutationFunction, + NonNegativeFloat, + ParentSelectionFunction, + PositiveFloat, + PositiveInt, +) + +if IS_PYGAD_INSTALLED: + import pygad + + +@mark.minimizer( + name="pygad", + solver_type=AggregationLevel.SCALAR, + is_available=IS_PYGAD_INSTALLED, + is_global=True, + needs_jac=False, + needs_hess=False, + supports_parallelism=True, + supports_bounds=True, + supports_linear_constraints=False, + supports_nonlinear_constraints=False, + disable_history=False, +) +@dataclass(frozen=True) +class Pygad(Algorithm): + population_size: PositiveInt | None = None + num_parents_mating: PositiveInt | None = 10 + num_generations: PositiveInt | None = 50 + + initial_population: NDArray[np.float64] | list[list[float]] | None = None + + parent_selection_type: ( + Literal["sss", "rws", "sus", "rank", "random", "tournament"] + | ParentSelectionFunction + ) = "sss" + keep_parents: int = -1 + keep_elitism: PositiveInt = 1 + K_tournament: PositiveInt = 3 + + crossover_type: ( + Literal["single_point", "two_points", "uniform", "scattered"] + | CrossoverFunction + | None + ) = "single_point" + crossover_probability: NonNegativeFloat | None = None + + mutation_type: ( + Literal["random", "swap", "inversion", "scramble", "adaptive"] + | MutationFunction + | None + ) = "random" + mutation_probability: ( + NonNegativeFloat + | list[NonNegativeFloat] + | tuple[NonNegativeFloat, NonNegativeFloat] + | NDArray[np.float64] + | None + ) = None + mutation_percent_genes: ( + PositiveFloat + | str + | list[PositiveFloat] + | tuple[PositiveFloat, PositiveFloat] + | NDArray[np.float64] + ) = "default" + mutation_num_genes: ( + PositiveInt + | list[PositiveInt] + | tuple[PositiveInt, PositiveInt] + | NDArray[np.int_] + | None + ) = None + mutation_by_replacement: bool = False + random_mutation_min_val: float | list[float] | NDArray[np.float64] = -1.0 + random_mutation_max_val: float | list[float] | NDArray[np.float64] = 1.0 + + allow_duplicate_genes: bool = True + + fitness_batch_size: PositiveInt | None = None + stop_criteria: str | list[str] | None = None + + n_cores: PositiveInt = 1 + random_seed: int | None = None + + def _solve_internal_problem( + self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] + ) -> InternalOptimizeResult: + if not IS_PYGAD_INSTALLED: + raise NotInstalledError( + "The 'pygad' algorithm requires the pygad package to be " + "installed. You can install it with 'pip install pygad'." + ) + + if ( + problem.bounds.lower is None + or problem.bounds.upper is None + or not np.isfinite(problem.bounds.lower).all() + or not np.isfinite(problem.bounds.upper).all() + ): + raise ValueError("pygad requires finite bounds for all parameters.") + + # Determine effective fitness_batch_size for parallel processing + effective_fitness_batch_size = determine_effective_batch_size( + self.fitness_batch_size, self.n_cores + ) + + if ( + effective_fitness_batch_size is not None + and effective_fitness_batch_size > 1 + and self.n_cores > 1 + ): + + def _fitness_func_batch( + _ga_instance: Any, + batch_solutions: NDArray[np.float64], + _batch_indices: list[int] | NDArray[np.int_], + ) -> list[float]: + solutions_list: list[NDArray[np.float64]] = [ + np.asarray(batch_solutions[i]) + for i in range(batch_solutions.shape[0]) + ] + batch_results = problem.batch_fun(solutions_list, n_cores=self.n_cores) + + return [-float(result) for result in batch_results] + + fitness_function: Any = _fitness_func_batch + else: + + def _fitness_func_single( + _ga_instance: Any, solution: NDArray[np.float64], _solution_idx: int + ) -> float: + return -float(problem.fun(solution)) + + fitness_function = _fitness_func_single + + population_size = get_population_size( + population_size=self.population_size, x=x0, lower_bound=10 + ) + + num_parents_mating = ( + self.num_parents_mating + if self.num_parents_mating is not None + else max(2, population_size // 2) + ) + + if self.initial_population is not None: + initial_population = np.array(self.initial_population) + else: + num_genes = len(x0) + + initial_population = np.random.uniform( + problem.bounds.lower, + problem.bounds.upper, + size=(population_size, num_genes), + ) + + initial_population[0] = x0 + + gene_space = [ + {"low": problem.bounds.lower[i], "high": problem.bounds.upper[i]} + for i in range(len(x0)) + ] + + ga_instance = pygad.GA( + num_generations=self.num_generations, + num_parents_mating=num_parents_mating, + fitness_func=fitness_function, + fitness_batch_size=effective_fitness_batch_size, + initial_population=initial_population, + gene_space=gene_space, + parent_selection_type=self.parent_selection_type, + keep_parents=self.keep_parents, + keep_elitism=self.keep_elitism, + K_tournament=self.K_tournament, + crossover_type=self.crossover_type, + crossover_probability=self.crossover_probability, + mutation_type=self.mutation_type, + mutation_probability=self.mutation_probability, + mutation_by_replacement=self.mutation_by_replacement, + mutation_percent_genes=self.mutation_percent_genes, + mutation_num_genes=self.mutation_num_genes, + random_mutation_min_val=self.random_mutation_min_val, + random_mutation_max_val=self.random_mutation_max_val, + allow_duplicate_genes=self.allow_duplicate_genes, + stop_criteria=self.stop_criteria, + parallel_processing=None, + random_seed=self.random_seed, + ) + + ga_instance.run() + + res = _process_pygad_result(ga_instance) + + return res + + +def determine_effective_batch_size( + fitness_batch_size: int | None, n_cores: int +) -> int | None: + """Determine the effective fitness_batch_size for parallel processing. + + Behavior: + - If `fitness_batch_size` is explicitly provided: + - The value is returned unchanged. + - A warning is issued if it is less than `n_cores`, as this may + underutilize available cores. + - If `fitness_batch_size` is `None`: + - If `n_cores` > 1, defaults to `n_cores`. + - Otherwise, returns None (i.e., single-threaded evaluation). + Args: + fitness_batch_size: User-specified batch size or None + n_cores: Number of cores for parallel processing + + Returns: + Effective batch size for PyGAD, or None for single-threaded processing + + """ + if fitness_batch_size is not None: + if fitness_batch_size < n_cores: + warnings.warn( + f"fitness_batch_size ({fitness_batch_size}) is smaller than " + f"n_cores ({n_cores}). This may reduce parallel efficiency. " + f"Consider setting fitness_batch_size >= n_cores." + ) + return fitness_batch_size + elif n_cores > 1: + return n_cores + else: + return None + + +def _process_pygad_result(ga_instance: Any) -> InternalOptimizeResult: + """Process PyGAD result into InternalOptimizeResult. + + Args: + ga_instance: The PyGAD instance after running the optimization + + Returns: + InternalOptimizeResult: Processed optimization results + + """ + best_solution, best_fitness, _ = ga_instance.best_solution() + + best_criterion = -best_fitness + + completed_generations = ga_instance.generations_completed + success = ga_instance.run_completed + if success: + message = ( + "Optimization terminated successfully.\n" + f"Generations completed: {completed_generations}" + ) + else: + message = ( + "Optimization failed to complete.\n" + f"Generations completed: {completed_generations}" + ) + + return InternalOptimizeResult( + x=best_solution, + fun=best_criterion, + success=success, + message=message, + n_fun_evals=ga_instance.generations_completed * ga_instance.pop_size[0], + ) diff --git a/src/optimagic/typing.py b/src/optimagic/typing.py index 2b400ecd9..2a20f65af 100644 --- a/src/optimagic/typing.py +++ b/src/optimagic/typing.py @@ -156,3 +156,62 @@ class MultiStartIterationHistory(TupleLikeAccess): history: IterationHistory local_histories: list[IterationHistory] | None = None exploration: IterationHistory | None = None + + +class ParentSelectionFunction(Protocol): + """Protocol for user-defined parent selection functions. + + Args: + fitness: Array of fitness values for all solutions in the population. + num_parents: Number of parents to select. + ga_instance: The PyGAD GA instance. + + Returns: + Tuple of (selected_parents, parent_indices) where: + - selected_parents: 2D array of selected parent solutions + - parent_indices: 1D array of indices of selected parents + + """ + + def __call__( + self, fitness: NDArray[np.float64], num_parents: int, ga_instance: Any + ) -> tuple[NDArray[np.float64], NDArray[np.int_]]: ... + + +class CrossoverFunction(Protocol): + """Protocol for user-defined crossover functions. + + Args: + parents: 2D array of parent solutions selected for mating. + offspring_size: Tuple (num_offspring, num_genes) specifying + offspring size. + ga_instance: The PyGAD GA instance. + + Returns: + 2D array of offspring solutions. + + """ + + def __call__( + self, + parents: NDArray[np.float64], + offspring_size: tuple[int, int], + ga_instance: Any, + ) -> NDArray[np.float64]: ... + + +class MutationFunction(Protocol): + """Protocol for user-defined mutation functions. + + Args: + offspring: 2D array of offspring solutions to be mutated. + ga_instance: The PyGAD GA instance. + + Returns: + 2D array of mutated offspring solutions. + + """ + + def __call__( + self, offspring: NDArray[np.float64], ga_instance: Any + ) -> NDArray[np.float64]: ...