diff --git a/.gitignore b/.gitignore index 5e1d077ec..5c92ba19e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +# AI +CLAUDE.md + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/docs/rtd_environment.yml b/docs/rtd_environment.yml index efa1ebac6..c7f941372 100644 --- a/docs/rtd_environment.yml +++ b/docs/rtd_environment.yml @@ -4,16 +4,15 @@ channels: - conda-forge - nodefaults dependencies: - - python=3.11 + - python=3.12 - typing-extensions - pip - setuptools_scm - toml - - sphinx + - sphinx>=8.2.3 - sphinxcontrib-bibtex - sphinx-copybutton - sphinx-design - - sphinx-panels - ipython - ipython_genutils - myst-nb @@ -31,7 +30,7 @@ dependencies: - annotated-types - pygmo>=2.19.0 - pip: - - ../ + - -e ../ - Py-BOBYQA - DFO-LS - pandas-stubs # dev, tests diff --git a/docs/source/algorithms.md b/docs/source/algorithms.md index f72b3d460..bd8837b9a 100644 --- a/docs/source/algorithms.md +++ b/docs/source/algorithms.md @@ -3,62 +3,42 @@ # Optimizers Check out {ref}`how-to-select-algorithms` to see how to select an algorithm and specify -`algo_options` when using `maximize` or `minimize`. +`algo_options` when using `maximize` or `minimize`. The default algorithm options are +discussed in {ref}`algo_options` and their type hints are documented in {ref}`typing`. -## Optimizers from scipy +## Optimizers from SciPy (scipy-algorithms)= -optimagic supports most `scipy` algorithms and scipy is automatically installed when you -install optimagic. +optimagic supports most [SciPy](https://scipy.org/) algorithms and SciPy is +automatically installed when you install optimagic. ```{eval-rst} .. dropdown:: scipy_lbfgsb - .. code-block:: - - "scipy_lbfgsb" - - Minimize a scalar function of one or more variables using the L-BFGS-B algorithm. - - The optimizer is taken from scipy, which calls the Fortran code written by the - original authors of the algorithm. The Fortran code includes the corrections - and improvements that were introduced in a follow up paper. - - lbfgsb is a limited memory version of the original bfgs algorithm, that deals with - lower and upper bounds via an active set approach. + **How to use this algorithm:** - The lbfgsb algorithm is well suited for differentiable scalar optimization problems - with up to several hundred parameters. - - It is a quasi-newton line search algorithm. At each trial point it evaluates the - criterion function and its gradient to find a search direction. It then approximates - the hessian using the stored history of gradients and uses the hessian to calculate - a candidate step size. Then it uses a gradient based line search algorithm to - determine the actual step length. Since the algorithm always evaluates the gradient - and criterion function jointly, the user should provide a - ``criterion_and_derivative`` function that exploits the synergies in the - calculation of criterion and gradient. - - The lbfgsb algorithm is almost perfectly scale invariant. Thus, it is not necessary - to scale the parameters. - - - **convergence.ftol_rel** (float): Stop when the relative improvement - between two iterations is smaller than this. More formally, this is expressed as + .. code-block:: - .. math:: + import optimagic as om + om.minimize( + ..., + algorithm=om.algos.scipy_lbfgsb(stopping_maxiter=1_000, ...) + ) + + or + + .. code-block:: - \frac{(f^k - f^{k+1})}{\\max{{|f^k|, |f^{k+1}|, 1}}} \leq - \text{relative_criterion_tolerance} + om.minimize( + ..., + algorithm="scipy_lbfgsb", + algo_options={"stopping_maxiter": 1_000, ...} + ) + **Description and available options:** - - **convergence.gtol_abs** (float): Stop if all elements of the projected - gradient are smaller than this. - - **stopping.maxfun** (int): If the maximum number of function - evaluation is reached, the optimization stops but we do not count this as convergence. - - **stopping.maxiter** (int): If the maximum number of iterations is reached, - the optimization stops, but we do not count this as convergence. - - **limited_memory_storage_length** (int): Maximum number of saved gradients used to approximate the hessian matrix. + .. autoclass:: optimagic.optimizers.scipy_optimizers.ScipyLBFGSB ``` diff --git a/docs/source/conf.py b/docs/source/conf.py index efe33eef9..f10cbde15 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -48,7 +48,6 @@ "sphinx_copybutton", "myst_nb", "sphinxcontrib.bibtex", - "sphinx_panels", "sphinx_design", "sphinxcontrib.mermaid", ] @@ -67,6 +66,28 @@ bibtex_bibfiles = ["refs.bib"] autodoc_member_order = "bysource" +autodoc_class_signature = "separated" +autodoc_default_options = { + "exclude-members": "__init__", + "members": True, + "undoc-members": True, + "member-order": "bysource", + "class-doc-from": "class", +} +autodoc_preserve_defaults = True +autodoc_type_aliases = { + "PositiveInt": "optimagic.typing.PositiveInt", + "NonNegativeInt": "optimagic.typing.NonNegativeInt", + "PositiveFloat": "optimagic.typing.PositiveFloat", + "NonNegativeFloat": "optimagic.typing.NonNegativeFloat", + "NegativeFloat": "optimagic.typing.NegativeFloat", + "GtOneFloat": "optimagic.typing.GtOneFloat", + "UnitIntervalFloat": "optimagic.typing.UnitIntervalFloat", + "YesNoBool": "optimagic.typing.YesNoBool", + "DirectionLiteral": "optimagic.typing.DirectionLiteral", + "BatchEvaluatorLiteral": "optimagic.typing.BatchEvaluatorLiteral", + "ErrorHandlingLiteral": "optimagic.typing.ErrorHandlingLiteral", +} autodoc_mock_imports = [ "bokeh", @@ -86,8 +107,8 @@ ] extlinks = { - "ghuser": ("https://github.com/%s", "@"), - "gh": ("https://github.com/optimagic-dev/optimagic/pulls/%s", "#"), + "ghuser": ("https://github.com/%s", "%s"), + "gh": ("https://github.com/optimagic-dev/optimagic/pull/%s", "%s"), } intersphinx_mapping = get_intersphinx_mapping( @@ -126,7 +147,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "en" # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -145,7 +166,7 @@ todo_emit_warnings = True # -- Options for myst-nb ---------------------------------------- -nb_execution_mode = "force" +nb_execution_mode = "force" # "off", "force", "cache", "auto" nb_execution_allow_errors = False nb_merge_streams = True diff --git a/docs/source/explanation/internal_optimizers.md b/docs/source/explanation/internal_optimizers.md index b1ed3523c..6ff5d17a1 100644 --- a/docs/source/explanation/internal_optimizers.md +++ b/docs/source/explanation/internal_optimizers.md @@ -98,7 +98,9 @@ To make switching between different algorithm as simple as possible, we align th of commonly used convergence and stopping criteria. We also align the default values for stopping and convergence criteria as much as possible. -You can find the harmonized names and value [here](algo_options_docs). +```{eval-rst} +You can find the harmonized names and value here: :ref:`algo_options`. +``` To align the names of other tuning parameters as much as possible with what is already there, simple have a look at the optimizers we already wrapped. For example, if you are diff --git a/docs/source/how_to/how_to_document_optimizers.md b/docs/source/how_to/how_to_document_optimizers.md new file mode 100644 index 000000000..a7481f0e5 --- /dev/null +++ b/docs/source/how_to/how_to_document_optimizers.md @@ -0,0 +1,254 @@ +# How to document optimizers + +This guide shows you how to document algorithms in optimagic using our new documentation +system. We'll walk through the process step-by-step using the `ScipyLBFGSB` optimizer as +a complete example. + +## When to Use This Guide + +Use this guide when you need to: + +- Document a new algorithm you've added to optimagic +- Migrate existing algorithm documentation from the old split system (docstrings + + `algorithms.md`) to the new system +- Update or improve existing algorithm documentation + +If you're adding a completely new optimizer to optimagic, start with the "How to Add +Optimizers guide" first, then use this guide to document your algorithm properly. + +## Why the New Documentation System? + +Previously, algorithm documentation was scattered across multiple places: + +- Basic descriptions in the algorithm class docstrings +- Detailed parameter descriptions in `algorithms.md` +- Usage examples separate from the algorithm definitions + +This made it hard to maintain consistency and keep documentation up-to-date. The new +system centralizes nearly all documentation in the algorithm code itself, making it: + +- Easier to maintain (documentation lives next to code) +- More consistent (unified format across all algorithms) +- Auto-generated (parameter lists appear automatically in docs) +- Type-safe (documentation matches actual parameter types) + +## The Documentation System Components + +Our documentation system has three main parts: + +1. **Algorithm Class Documentation**: A comprehensive docstring in the algorithm + dataclass that explains what the algorithm does, how it works, and when to use it +1. **Parameter Documentation**: Detailed docstrings for each parameter with mathematical + formulations when needed +1. **Usage Integration**: A section in `algorithms.md` that show how to use the + algorithm + +Let's walk through documenting an algorithm from start to finish. + +## Example: Documenting ScipyLBFGSB + +We'll use the `ScipyLBFGSB` optimizer to show you exactly how to document an algorithm. +This is a real example from the optimagic codebase, so you can follow along and see the +results. + +### Step 1: Understand Your Algorithm + +Before writing documentation, make sure you understand: + +- What the algorithm does mathematically +- What problems it's designed to solve +- How its parameters affect behavior +- Any performance characteristics or limitations + +For L-BFGS-B, this means understanding it's a quasi-Newton method for bound-constrained +optimization that approximates the Hessian using gradient history. + +```{eval-rst} + +.. note:: + If you are simply migrating an existing algorithm, you can mostly rely on the + existing documentation in the algorithm class docstring and `algorithms.md`. + +``` + +### Step 2: Write the Algorithm Class Documentation + +The algorithm class docstring is the most important part. It should give users +everything they need to decide whether to use this algorithm. + +Here's how we document `ScipyLBFGSB`: + +```python +# src/optimagic/optimizers/scipy_optimizers.py +class ScipyLBFGSB(Algorithm): + """Minimize a scalar differentiable function using the L-BFGS-B algorithm. + + The optimizer is taken from scipy, which calls the Fortran code written by the + original authors of the algorithm. The Fortran code includes the corrections + and improvements that were introduced in a follow up paper. + + lbfgsb is a limited memory version of the original bfgs algorithm, that deals with + lower and upper bounds via an active set approach. + + The lbfgsb algorithm is well suited for differentiable scalar optimization problems + with up to several hundred parameters. + + It is a quasi-newton line search algorithm. At each trial point it evaluates the + criterion function and its gradient to find a search direction. It then approximates + the hessian using the stored history of gradients and uses the hessian to calculate + a candidate step size. Then it uses a gradient based line search algorithm to + determine the actual step length. Since the algorithm always evaluates the gradient + and criterion function jointly, the user should provide a ``fun_and_jac`` function + that exploits the synergies in the calculation of criterion and gradient. + + The lbfgsb algorithm is almost perfectly scale invariant. Thus, it is not necessary + to scale the parameters. + + """ +``` + +**What makes this docstring effective:** + +- **Clear first line**: States exactly what the algorithm does +- **Implementation details**: Explains it uses scipy's Fortran implementation +- **Algorithm classification**: Identifies it as a quasi-Newton method +- **Problem suitability**: Explains what problems it's good for +- **How it works**: Brief explanation of the algorithm's approach +- **Performance characteristics**: Mentions scale invariance +- **Usage advice**: Suggests using `fun_and_jac` for efficiency + +### Step 3: Document Individual Parameters + +Each parameter needs clear documentation explaining what it controls and how it affects +the algorithm's behavior. + +```python +# Basic parameter documentation +stopping_maxiter: PositiveInt = STOPPING_MAXITER +"""Maximum number of iterations.""" + +# Parameter with mathematical formulation +convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL +r"""Converge if the relative change in the objective function is less than this +value. More formally, this is expressed as. + +.. math:: + + \frac{f^k - f^{k+1}}{\max\{|f^k|, |f^{k+1}|, 1\}} \leq + \textsf{convergence_ftol_rel}. + +""" + +# Parameter with external library context +limited_memory_storage_length: PositiveInt = LIMITED_MEMORY_STORAGE_LENGTH +"""The maximum number of variable metric corrections used to define the limited +memory matrix. This is the 'maxcor' parameter in the SciPy documentation. + +The default value is taken from SciPy's L-BFGS-B implementation. Larger values use +more memory but may converge faster for some problems. + +""" +``` + +**Key principles for parameter documentation:** + +- **Start with a clear description** of what the parameter controls +- **Add mathematical formulations** when they clarify the exact meaning (use `r"""` for + raw strings with LaTeX) +- **Include external library context** when relevant (e.g., "Default value is taken from + SciPy") +- **Explain performance implications** when they matter +- **Use proper type annotations** that match the parameter's constraints + +### Step 4: Integrate into `algorithms.md` + +The final step is integrating your documented algorithm into the main documentation. +This creates a dropdown section that shows users how to use the algorithm. + +Add the following to `docs/source/algorithms.md` in an `eval-rst` block: + +```text +.. dropdown:: scipy_lbfgsb + + **How to use this algorithm:** + + .. code-block:: python + + import optimagic as om + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm=om.algos.scipy_lbfgsb(stopping_maxiter=1_000, ...), + ) + + or using the string interface: + + .. code-block:: python + + om.minimize( + fun=lambda x: x @ x, + params=[1.0, 2.0, 3.0], + algorithm="scipy_lbfgsb", + algo_options={"stopping_maxiter": 1_000, ...}, + ) + + **Description and available options:** + + .. autoclass:: optimagic.optimizers.scipy_optimizers.ScipyLBFGSB +``` + +**What this section provides:** + +- **The dropdown button and title**: Makes it easy to find the algorithm +- **Concrete usage examples** showing both the object and string interfaces +- **Algorithm-specific parameter** in the usage example +- **Auto-generated documentation** via the `autoclass` directive that pulls in your + docstrings + +## Working with Existing Documentation + +If you're migrating an algorithm that already has documentation: + +### Finding Existing Content + +Look for existing documentation in: + +- **Algorithm class docstrings**: Usually basic descriptions +- **`docs/source/algorithms.md`**: Detailed parameter descriptions and examples +- **Research papers**: For mathematical formulations and background +- **External library docs**: For default values and parameter meanings + +### Migration Strategy + +1. **Start with the algorithm class**: Move the best description from `algorithms.md` to + the class docstring +1. **Update and expand**: Add missing information about performance, usage, etc. +1. **Move parameter docs**: Transfer parameter descriptions from `algorithms.md` to + individual parameter docstrings +1. **Verify accuracy**: Check that all information is current and correct +1. **Create new integration**: Replace the old `algorithms.md` section with the new + dropdown format + +## Common Pitfalls to Avoid + +- **Don't copy-paste generic descriptions**: Each algorithm needs specific, detailed + documentation +- **Don't skip mathematical formulations**: When convergence criteria or parameters have + precise mathematical definitions, include them +- **Don't ignore external library context**: Always mention where default values come + from +- **Don't use vague parameter descriptions**: "Controls the algorithm behavior" is not + helpful +- **Don't forget performance implications**: Users need to understand trade-offs between + parameters + +## Getting Help + +If you're stuck or need clarification: + +- Look at existing well-documented algorithms like `ScipyLBFGSB` +- Check the {ref}`style_guide` for coding conventions +- Ask questions in GitHub issues or discussions + +The goal is to make optimagic's algorithm documentation the best resource for +understanding and using optimization algorithms effectively. diff --git a/docs/source/how_to/index.md b/docs/source/how_to/index.md index 2a2f362d5..911762e43 100644 --- a/docs/source/how_to/index.md +++ b/docs/source/how_to/index.md @@ -25,4 +25,5 @@ how_to_errors_during_optimization how_to_slice_plot how_to_benchmarking how_to_add_optimizers +how_to_document_optimizers ``` diff --git a/docs/source/reference/algo_options.md b/docs/source/reference/algo_options.md index 2336c8003..367644521 100644 --- a/docs/source/reference/algo_options.md +++ b/docs/source/reference/algo_options.md @@ -1,4 +1,4 @@ -(algo_options_docs)= +(algo_options)= # The default algorithm options diff --git a/docs/source/reference/index.md b/docs/source/reference/index.md index 3ee07cee1..728a29d38 100644 --- a/docs/source/reference/index.md +++ b/docs/source/reference/index.md @@ -218,4 +218,5 @@ maxdepth: 1 utilities algo_options batch_evaluators +typing ``` diff --git a/docs/source/reference/typing.md b/docs/source/reference/typing.md new file mode 100644 index 000000000..1a13cdf6f --- /dev/null +++ b/docs/source/reference/typing.md @@ -0,0 +1,10 @@ +(typing)= + +# Types + +```{eval-rst} + +.. automodule:: optimagic.typing + :members: + +``` diff --git a/src/optimagic/optimizers/_pounders/bntr.py b/src/optimagic/optimizers/_pounders/bntr.py index fe039e7cb..341cfcaa9 100644 --- a/src/optimagic/optimizers/_pounders/bntr.py +++ b/src/optimagic/optimizers/_pounders/bntr.py @@ -67,9 +67,9 @@ def bntr( x_candidate (np.ndarray): Initial guess for the solution of the subproblem. conjugate_gradient_method (str): Method for computing the conjugate gradient step. Available conjugate gradient methods are: - - "cg" - - "steihaug_toint" - - "trsbox" (default) + - "cg" + - "steihaug_toint" + - "trsbox" (default) maxiter (int): Maximum number of iterations. If reached, terminate. maxiter_gradient_descent (int): Maximum number of steepest descent iterations to perform when the trust-region subsolver BNTR is used. diff --git a/src/optimagic/optimizers/_pounders/pounders_auxiliary.py b/src/optimagic/optimizers/_pounders/pounders_auxiliary.py index 223c028fe..f3c73177f 100644 --- a/src/optimagic/optimizers/_pounders/pounders_auxiliary.py +++ b/src/optimagic/optimizers/_pounders/pounders_auxiliary.py @@ -36,7 +36,7 @@ def create_initial_residual_model(history, accepted_index, delta): Returns: ResidualModel: Residual model containing the initial parameters for - ``linear_terms`` and ``square_terms``. + ``linear_terms`` and ``square_terms``. """ center_info = { @@ -221,9 +221,9 @@ def solve_subproblem( - "gqtpar" (does not support bound constraints) conjugate_gradient_method (str): Method for computing the conjugate gradient step. Available conjugate gradient methods are: - - "cg" - - "steihaug_toint" - - "trsbox" (default) + - "cg" + - "steihaug_toint" + - "trsbox" (default) maxiter (int): Maximum number of iterations to perform when solving the trust-region subproblem. maxiter_gradient_descent (int): Maximum number of gradient descent iterations @@ -336,7 +336,7 @@ def find_affine_points( If *project_x_onto_null* is False, it is an array filled with zeros. project_x_onto_null (int): Indicator whether to calculate the QR decomposition of *model_improving_points* and multiply it - with vector *x_projected*. + with vector *x_projected*. delta (float): Delta, current trust-region radius. theta1 (float): Threshold for adding the current x candidate to the model. c (float): Threshold for acceptance of the norm of our current x candidate. diff --git a/src/optimagic/optimizers/pounders.py b/src/optimagic/optimizers/pounders.py index 0ad500cb8..2fa2650b2 100644 --- a/src/optimagic/optimizers/pounders.py +++ b/src/optimagic/optimizers/pounders.py @@ -246,9 +246,9 @@ def internal_solve_pounders( conjugate_gradient_method_sub (str): Method for computing the conjugate gradient step ("bntr"). Available conjugate gradient methods are: - - "cg" - - "steihaug_toint" - - "trsbox" (default) + - "cg" + - "steihaug_toint" + - "trsbox" (default) maxiter_sub (int): Maximum number of iterations in the trust-region subproblem. maxiter_gradient_descent_sub (int): Maximum number of gradient descent iterations to perform ("bntr"). diff --git a/src/optimagic/optimizers/scipy_optimizers.py b/src/optimagic/optimizers/scipy_optimizers.py index f5bdcecbd..b35aef29d 100644 --- a/src/optimagic/optimizers/scipy_optimizers.py +++ b/src/optimagic/optimizers/scipy_optimizers.py @@ -33,6 +33,8 @@ """ +from __future__ import annotations + import functools from dataclasses import dataclass from typing import Any, Callable, List, Literal, SupportsInt, Tuple @@ -101,12 +103,68 @@ ) @dataclass(frozen=True) class ScipyLBFGSB(Algorithm): + """Minimize a scalar differentiable function using the L-BFGS-B algorithm. + + The optimizer is taken from scipy, which calls the Fortran code written by the + original authors of the algorithm. The Fortran code includes the corrections + and improvements that were introduced in a follow up paper. + + lbfgsb is a limited memory version of the original bfgs algorithm, that deals with + lower and upper bounds via an active set approach. + + The lbfgsb algorithm is well suited for differentiable scalar optimization problems + with up to several hundred parameters. + + It is a quasi-newton line search algorithm. At each trial point it evaluates the + criterion function and its gradient to find a search direction. It then approximates + the hessian using the stored history of gradients and uses the hessian to calculate + a candidate step size. Then it uses a gradient based line search algorithm to + determine the actual step length. Since the algorithm always evaluates the gradient + and criterion function jointly, the user should provide a ``fun_and_jac`` function + that exploits the synergies in the calculation of criterion and gradient. + + The lbfgsb algorithm is almost perfectly scale invariant. Thus, it is not necessary + to scale the parameters. + + """ + convergence_ftol_rel: NonNegativeFloat = CONVERGENCE_FTOL_REL + r"""Converge if the relative change in the objective function is less than this + value. More formally, this is expressed as. + + .. math:: + + \frac{f^k - f^{k+1}}{\max\{{|f^k|, |f^{k+1}|, 1}\}} \leq + \textsf{convergence_ftol_rel}. + + """ + convergence_gtol_abs: NonNegativeFloat = CONVERGENCE_GTOL_ABS + """Converge if the absolute values in the gradient of the objective function are + less than this value.""" + stopping_maxfun: PositiveInt = STOPPING_MAXFUN + """Maximum number of function evaluations.""" + stopping_maxiter: PositiveInt = STOPPING_MAXITER + """Maximum number of iterations.""" + limited_memory_storage_length: PositiveInt = LIMITED_MEMORY_STORAGE_LENGTH + """The maximum number of variable metric corrections used to define the limited + memory matrix. This is the 'maxcor' parameter in the SciPy documentation. + + The default value is taken from SciPy's L-BFGS-B implementation. Larger values use + more memory but may converge faster for some problems. + + """ + max_line_search_steps: PositiveInt = MAX_LINE_SEARCH_STEPS + """The maximum number of line search steps. This is the 'maxls' parameter in the + SciPy documentation. + + The default value is taken from SciPy's L-BFGS-B implementation. + + """ def _solve_internal_problem( self, problem: InternalOptimizationProblem, x0: NDArray[np.float64] diff --git a/src/optimagic/typing.py b/src/optimagic/typing.py index 0b3ce93a3..443bad959 100644 --- a/src/optimagic/typing.py +++ b/src/optimagic/typing.py @@ -115,16 +115,28 @@ def __call__( PositiveInt = Annotated[int, Gt(0)] +"""Type alias for positive integers (greater than 0).""" NonNegativeInt = Annotated[int, Ge(0)] +"""Type alias for non-negative integers (greater than or equal to 0).""" PositiveFloat = Annotated[float, Gt(0)] +"""Type alias for positive floats (greater than 0).""" NonNegativeFloat = Annotated[float, Ge(0)] +"""Type alias for non-negative floats (greater than or equal to 0).""" NegativeFloat = Annotated[float, Lt(0)] +"""Type alias for negative floats (less than 0).""" GtOneFloat = Annotated[float, Gt(1)] +"""Type alias for floats greater than 1.""" UnitIntervalFloat = Annotated[float, Gt(0), Le(1)] +"""Type alias for floats in (0, 1].""" YesNoBool = Literal["yes", "no"] | bool +"""Type alias for boolean values represented as 'yes' or 'no' strings or as boolean +values.""" DirectionLiteral = Literal["minimize", "maximize"] +"""Type alias for optimization direction, either 'minimize' or 'maximize'.""" BatchEvaluatorLiteral = Literal["joblib", "pathos", "threading"] +"""Type alias for batch evaluator types, can be 'joblib', 'pathos', or 'threading'.""" ErrorHandlingLiteral = Literal["raise", "continue"] +"""Type alias for error handling strategies, can be 'raise' or 'continue'.""" @dataclass(frozen=True)