diff --git a/src/optimagic/optimization/algorithm.py b/src/optimagic/optimization/algorithm.py
index 7f776cf90..a06fcd120 100644
--- a/src/optimagic/optimization/algorithm.py
+++ b/src/optimagic/optimization/algorithm.py
@@ -10,12 +10,16 @@
 
 from optimagic.exceptions import InvalidAlgoInfoError, InvalidAlgoOptionError
 from optimagic.logging.types import StepStatus
+from optimagic.optimization.convergence_report import get_convergence_report
 from optimagic.optimization.history import History
 from optimagic.optimization.internal_optimization_problem import (
     InternalOptimizationProblem,
 )
+from optimagic.optimization.optimize_result import OptimizeResult
+from optimagic.parameters.conversion import Converter
 from optimagic.type_conversion import TYPE_CONVERTERS
-from optimagic.typing import AggregationLevel
+from optimagic.typing import AggregationLevel, Direction, ExtraResultFields
+from optimagic.utilities import isscalar
 
 
 @dataclass(frozen=True)
@@ -82,7 +86,6 @@ class InternalOptimizeResult:
     max_constraint_violation: float | None = None
     info: dict[str, typing.Any] | None = None
     history: History | None = None
-    multistart_info: dict[str, typing.Any] | None = None
 
     def __post_init__(self) -> None:
         report: list[str] = []
@@ -142,6 +145,56 @@ def __post_init__(self) -> None:
             )
             raise TypeError(msg)
 
+    def create_optimize_result(
+        self,
+        converter: Converter,
+        solver_type: AggregationLevel,
+        extra_fields: ExtraResultFields,
+    ) -> OptimizeResult:
+        """Process an internal optimizer result."""
+        params = converter.params_from_internal(self.x)
+        if isscalar(self.fun):
+            fun = float(self.fun)
+        elif solver_type == AggregationLevel.LIKELIHOOD:
+            fun = float(np.sum(self.fun))
+        elif solver_type == AggregationLevel.LEAST_SQUARES:
+            fun = np.dot(self.fun, self.fun)
+
+        if extra_fields.direction == Direction.MAXIMIZE:
+            fun = -fun
+
+        if self.history is not None:
+            conv_report = get_convergence_report(
+                history=self.history, direction=extra_fields.direction
+            )
+        else:
+            conv_report = None
+
+        out = OptimizeResult(
+            params=params,
+            fun=fun,
+            start_fun=extra_fields.start_fun,
+            start_params=extra_fields.start_params,
+            algorithm=extra_fields.algorithm,
+            direction=extra_fields.direction.value,
+            n_free=extra_fields.n_free,
+            message=self.message,
+            success=self.success,
+            n_fun_evals=self.n_fun_evals,
+            n_jac_evals=self.n_jac_evals,
+            n_hess_evals=self.n_hess_evals,
+            n_iterations=self.n_iterations,
+            status=self.status,
+            jac=self.jac,
+            hess=self.hess,
+            hess_inv=self.hess_inv,
+            max_constraint_violation=self.max_constraint_violation,
+            history=self.history,
+            algorithm_output=self.info,
+            convergence_report=conv_report,
+        )
+        return out
+
 
 class AlgorithmMeta(ABCMeta):
     """Metaclass to get repr, algo_info and name for classes, not just instances."""
@@ -234,7 +287,7 @@ def solve_internal_problem(
         problem: InternalOptimizationProblem,
         x0: NDArray[np.float64],
         step_id: int,
-    ) -> InternalOptimizeResult:
+    ) -> OptimizeResult:
         problem = problem.with_new_history().with_step_id(step_id)
 
         if problem.logger:
@@ -242,17 +295,32 @@ def solve_internal_problem(
                 step_id, {"status": str(StepStatus.RUNNING.value)}
             )
 
-        result = self._solve_internal_problem(problem, x0)
+        raw_res = self._solve_internal_problem(problem, x0)
 
-        if (not self.algo_info.disable_history) and (result.history is None):
-            result = replace(result, history=problem.history)
+        if (not self.algo_info.disable_history) and (raw_res.history is None):
+            raw_res = replace(raw_res, history=problem.history)
 
         if problem.logger:
             problem.logger.step_store.update(
                 step_id, {"status": str(StepStatus.COMPLETE.value)}
             )
 
-        return result
+        # make sure the start params provided in static_result_fields are the same as x0
+        extra_fields = problem.static_result_fields
+        x0_problem = problem.converter.params_to_internal(extra_fields.start_params)
+        if not np.allclose(x0_problem, x0):
+            start_params = problem.converter.params_from_internal(x0)
+            extra_fields = replace(
+                extra_fields, start_params=start_params, start_fun=None
+            )
+
+        res = raw_res.create_optimize_result(
+            converter=problem.converter,
+            solver_type=self.algo_info.solver_type,
+            extra_fields=problem.static_result_fields,
+        )
+
+        return res
 
     def with_option_if_applicable(self, **kwargs: Any) -> Self:
         """Call with_option only with applicable keyword arguments."""
diff --git a/src/optimagic/optimization/internal_optimization_problem.py b/src/optimagic/optimization/internal_optimization_problem.py
index f0951df74..0c9a9fc9b 100644
--- a/src/optimagic/optimization/internal_optimization_problem.py
+++ b/src/optimagic/optimization/internal_optimization_problem.py
@@ -23,6 +23,7 @@
     Direction,
     ErrorHandling,
     EvalTask,
+    ExtraResultFields,
     PyTree,
 )
 
@@ -55,6 +56,7 @@ def __init__(
         linear_constraints: list[dict[str, Any]] | None,
         nonlinear_constraints: list[dict[str, Any]] | None,
         logger: LogStore[Any, Any] | None,
+        static_result_fields: ExtraResultFields,
         # TODO: add hess and hessp
     ):
         self._fun = fun
@@ -73,6 +75,7 @@ def __init__(
         self._nonlinear_constraints = nonlinear_constraints
         self._logger = logger
         self._step_id: int | None = None
+        self._static_result_fields = static_result_fields
 
     # ==================================================================================
     # Public methods used by optimizers
@@ -218,6 +221,14 @@ def bounds(self) -> InternalBounds:
     def logger(self) -> LogStore[Any, Any] | None:
         return self._logger
 
+    @property
+    def converter(self) -> Converter:
+        return self._converter
+
+    @property
+    def static_result_fields(self) -> ExtraResultFields:
+        return self._static_result_fields
+
     # ==================================================================================
     # Implementation of the public functions; The main difference is that the lower-
     # level implementations return a history entry instead of adding it to the history
diff --git a/src/optimagic/optimization/multistart.py b/src/optimagic/optimization/multistart.py
index c3d4cf3e1..c36a9d54b 100644
--- a/src/optimagic/optimization/multistart.py
+++ b/src/optimagic/optimization/multistart.py
@@ -12,7 +12,6 @@
 """
 
 import warnings
-from dataclasses import replace
 from typing import Literal
 
 import numpy as np
@@ -21,7 +20,7 @@
 
 from optimagic.logging.logger import LogStore
 from optimagic.logging.types import StepStatus
-from optimagic.optimization.algorithm import Algorithm, InternalOptimizeResult
+from optimagic.optimization.algorithm import Algorithm
 from optimagic.optimization.internal_optimization_problem import (
     InternalBounds,
     InternalOptimizationProblem,
@@ -30,6 +29,8 @@
 from optimagic.optimization.optimization_logging import (
     log_scheduled_steps_and_get_ids,
 )
+from optimagic.optimization.optimize_result import OptimizeResult
+from optimagic.optimization.process_multistart_result import process_multistart_result
 from optimagic.typing import AggregationLevel, ErrorHandling
 from optimagic.utilities import get_rng
 
@@ -42,7 +43,7 @@ def run_multistart_optimization(
     options: InternalMultistartOptions,
     logger: LogStore | None,
     error_handling: ErrorHandling,
-) -> InternalOptimizeResult:
+) -> OptimizeResult:
     steps = determine_steps(options.n_samples, stopping_maxopt=options.stopping_maxopt)
 
     scheduled_steps = log_scheduled_steps_and_get_ids(
@@ -159,6 +160,7 @@ def single_optimization(x0, step_id):
             results=batch_results,
             convergence_criteria=convergence_criteria,
             solver_type=local_algorithm.algo_info.solver_type,
+            converter=internal_problem.converter,
         )
         opt_counter += len(batch)
         if is_converged:
@@ -168,15 +170,20 @@ def single_optimization(x0, step_id):
                     logger.step_store.update(step, {"status": new_status})
             break
 
-    multistart_info = {
-        "start_parameters": state["start_history"],
-        "local_optima": state["result_history"],
-        "exploration_sample": sorted_sample,
-        "exploration_results": exploration_res["sorted_values"],
-    }
-
     raw_res = state["best_res"]
-    res = replace(raw_res, multistart_info=multistart_info)
+
+    expl_sample = [
+        internal_problem.converter.params_from_internal(s) for s in sorted_sample
+    ]
+    expl_res = list(exploration_res["sorted_values"])
+
+    res = process_multistart_result(
+        raw_res=raw_res,
+        extra_fields=internal_problem.static_result_fields,
+        local_optima=state["result_history"],
+        exploration_sample=expl_sample,
+        exploration_results=expl_res,
+    )
 
     return res
 
@@ -371,7 +378,12 @@ def get_batched_optimization_sample(sorted_sample, stopping_maxopt, batch_size):
 
 
 def update_convergence_state(
-    current_state, starts, results, convergence_criteria, solver_type
+    current_state,
+    starts,
+    results,
+    convergence_criteria,
+    solver_type,
+    converter,
 ):
     """Update the state of all quantities related to convergence.
 
@@ -389,6 +401,7 @@ def update_convergence_state(
         convergence_criteria (dict): Dict with the entries "xtol" and "max_discoveries"
         solver_type: The aggregation level of the local optimizer. Needed to
             interpret the output of the internal criterion function.
+        converter: The converter to map between internal and external parameter spaces.
 
 
     Returns:
@@ -422,7 +435,7 @@ def update_convergence_state(
     # ==================================================================================
     valid_results = [results[i] for i in valid_indices]
     valid_starts = [starts[i] for i in valid_indices]
-    valid_new_x = [res.x for res in valid_results]
+    valid_new_x = [converter.params_to_internal(res.params) for res in valid_results]
     valid_new_y = []
 
     # make the criterion output scalar if a least squares optimizer returns an
diff --git a/src/optimagic/optimization/optimize.py b/src/optimagic/optimization/optimize.py
index 7935de635..07f557767 100644
--- a/src/optimagic/optimization/optimize.py
+++ b/src/optimagic/optimization/optimize.py
@@ -48,11 +48,6 @@
 )
 from optimagic.optimization.optimization_logging import log_scheduled_steps_and_get_ids
 from optimagic.optimization.optimize_result import OptimizeResult
-from optimagic.optimization.process_results import (
-    ExtraResultFields,
-    process_multistart_result,
-    process_single_result,
-)
 from optimagic.parameters.bounds import Bounds
 from optimagic.parameters.conversion import (
     get_converter,
@@ -64,6 +59,7 @@
     Direction,
     ErrorHandling,
     ErrorHandlingLiteral,
+    ExtraResultFields,
     NonNegativeFloat,
     PyTree,
 )
@@ -543,18 +539,6 @@ def _optimize(problem: OptimizationProblem) -> OptimizeResult:
         add_soft_bounds=problem.multistart is not None,
     )
 
-    # ==================================================================================
-    # initialize the log database
-    # ==================================================================================
-    logger: LogStore[Any, Any] | None
-
-    if problem.logging:
-        logger = LogStore.from_options(problem.logging)
-        problem_data = ProblemInitialization(problem.direction, problem.params)
-        logger.problem_store.insert(problem_data)
-    else:
-        logger = None
-
     # ==================================================================================
     # Do some things that require internal parameters or bounds
     # ==================================================================================
@@ -583,12 +567,37 @@ def _optimize(problem: OptimizationProblem) -> OptimizeResult:
         numdiff_options=problem.numdiff_options,
         skip_checks=problem.skip_checks,
     )
+    # Define static information that will be added to the OptimizeResult
+    _scalar_start_criterion = cast(
+        float, first_crit_eval.internal_value(AggregationLevel.SCALAR)
+    )
+    extra_fields = ExtraResultFields(
+        start_fun=_scalar_start_criterion,
+        start_params=problem.params,
+        algorithm=problem.algorithm.algo_info.name,
+        direction=problem.direction,
+        n_free=internal_params.free_mask.sum(),
+    )
 
+    # create x and internal_bounds
     x = internal_params.values
     internal_bounds = InternalBounds(
         lower=internal_params.lower_bounds,
         upper=internal_params.upper_bounds,
     )
+
+    # ==================================================================================
+    # initialize the log database
+    # ==================================================================================
+    logger: LogStore[Any, Any] | None
+
+    if problem.logging:
+        logger = LogStore.from_options(problem.logging)
+        problem_data = ProblemInitialization(problem.direction, problem.params)
+        logger.problem_store.insert(problem_data)
+    else:
+        logger = None
+
     # ==================================================================================
     # Create a batch evaluator
     # ==================================================================================
@@ -616,6 +625,7 @@ def _optimize(problem: OptimizationProblem) -> OptimizeResult:
         linear_constraints=None,
         nonlinear_constraints=internal_nonlinear_constraints,
         logger=logger,
+        static_result_fields=extra_fields,
     )
 
     # ==================================================================================
@@ -630,7 +640,7 @@ def _optimize(problem: OptimizationProblem) -> OptimizeResult:
             logger=logger,
         )[0]
 
-        raw_res = problem.algorithm.solve_internal_problem(internal_problem, x, step_id)
+        res = problem.algorithm.solve_internal_problem(internal_problem, x, step_id)
 
     else:
         multistart_options = get_internal_multistart_options_from_public(
@@ -644,7 +654,7 @@ def _optimize(problem: OptimizationProblem) -> OptimizeResult:
             upper=internal_params.soft_upper_bounds,
         )
 
-        raw_res = run_multistart_optimization(
+        res = run_multistart_optimization(
             local_algorithm=problem.algorithm,
             internal_problem=internal_problem,
             x=x,
@@ -655,37 +665,10 @@ def _optimize(problem: OptimizationProblem) -> OptimizeResult:
         )
 
     # ==================================================================================
-    # Process the result
+    # Add the log reader to the result
     # ==================================================================================
 
-    _scalar_start_criterion = cast(
-        float, first_crit_eval.internal_value(AggregationLevel.SCALAR)
-    )
     log_reader: LogReader[Any] | None
-
-    extra_fields = ExtraResultFields(
-        start_fun=_scalar_start_criterion,
-        start_params=problem.params,
-        algorithm=problem.algorithm.algo_info.name,
-        direction=problem.direction,
-        n_free=internal_params.free_mask.sum(),
-    )
-
-    if problem.multistart is None:
-        res = process_single_result(
-            raw_res=raw_res,
-            converter=converter,
-            solver_type=problem.algorithm.algo_info.solver_type,
-            extra_fields=extra_fields,
-        )
-    else:
-        res = process_multistart_result(
-            raw_res=raw_res,
-            converter=converter,
-            solver_type=problem.algorithm.algo_info.solver_type,
-            extra_fields=extra_fields,
-        )
-
     if logger is not None:
         assert problem.logging is not None
         log_reader = LogReader.from_options(problem.logging)
diff --git a/src/optimagic/optimization/optimize_result.py b/src/optimagic/optimization/optimize_result.py
index 5b692fc92..8c2a0fdfb 100644
--- a/src/optimagic/optimization/optimize_result.py
+++ b/src/optimagic/optimization/optimize_result.py
@@ -41,7 +41,7 @@ class OptimizeResult:
 
     params: Any
     fun: float
-    start_fun: float
+    start_fun: float | None
     start_params: Any
     algorithm: str
     direction: str
@@ -78,7 +78,7 @@ def criterion(self) -> float:
         return self.fun
 
     @property
-    def start_criterion(self) -> float:
+    def start_criterion(self) -> float | None:
         msg = (
             "The start_criterion attribute is deprecated. Use the start_fun attribute "
             "instead."
diff --git a/src/optimagic/optimization/process_multistart_result.py b/src/optimagic/optimization/process_multistart_result.py
new file mode 100644
index 000000000..7d10ba0bc
--- /dev/null
+++ b/src/optimagic/optimization/process_multistart_result.py
@@ -0,0 +1,84 @@
+import numpy as np
+from numpy.typing import NDArray
+
+from optimagic.optimization.convergence_report import get_convergence_report
+from optimagic.optimization.optimize_result import MultistartInfo, OptimizeResult
+from optimagic.typing import Direction, ExtraResultFields
+
+
+def process_multistart_result(
+    raw_res: OptimizeResult,
+    extra_fields: ExtraResultFields,
+    local_optima: list[OptimizeResult],
+    exploration_sample: list[NDArray[np.float64]],
+    exploration_results: list[float],
+) -> OptimizeResult:
+    """Process results of internal optimizers."""
+
+    if isinstance(raw_res, str):
+        res = _dummy_result_from_traceback(raw_res, extra_fields)
+    else:
+        res = raw_res
+        if extra_fields.direction == Direction.MAXIMIZE:
+            exploration_results = [-res for res in exploration_results]
+
+        info = MultistartInfo(
+            start_parameters=[opt.start_params for opt in local_optima],
+            local_optima=local_optima,
+            exploration_sample=exploration_sample,
+            exploration_results=exploration_results,
+        )
+
+        # ==============================================================================
+        # create a convergence report for the multistart optimization; This is not
+        # the same as the convergence report for the individual local optimizations.
+        # ==============================================================================
+        crit_hist = [opt.fun for opt in info.local_optima]
+        params_hist = [opt.params for opt in info.local_optima]
+        time_hist = [np.nan for opt in info.local_optima]
+        hist = {"criterion": crit_hist, "params": params_hist, "runtime": time_hist}
+
+        conv_report = get_convergence_report(
+            history=hist,
+            direction=extra_fields.direction,
+        )
+
+        res.convergence_report = conv_report
+
+        res.algorithm = f"multistart_{res.algorithm}"
+        res.n_iterations = _sum_or_none([opt.n_iterations for opt in info.local_optima])
+
+        res.n_fun_evals = _sum_or_none([opt.n_fun_evals for opt in info.local_optima])
+        res.n_jac_evals = _sum_or_none([opt.n_jac_evals for opt in info.local_optima])
+
+        res.multistart_info = info
+    return res
+
+
+def _dummy_result_from_traceback(
+    candidate: str, extra_fields: ExtraResultFields
+) -> OptimizeResult:
+    if extra_fields.start_fun is None:
+        start_fun = np.inf
+    else:
+        start_fun = extra_fields.start_fun
+
+    out = OptimizeResult(
+        params=extra_fields.start_params,
+        fun=start_fun,
+        start_fun=start_fun,
+        start_params=extra_fields.start_params,
+        algorithm=extra_fields.algorithm,
+        direction=extra_fields.direction.value,
+        n_free=extra_fields.n_free,
+        message=candidate,
+    )
+    return out
+
+
+def _sum_or_none(summands: list[int | None | float]) -> int | None:
+    if any(s is None for s in summands):
+        out = None
+    else:
+        out = int(np.array(summands).sum())
+    return out
diff --git a/src/optimagic/optimization/process_results.py b/src/optimagic/optimization/process_results.py
deleted file mode 100644
index 0817649f5..000000000
--- a/src/optimagic/optimization/process_results.py
+++ /dev/null
@@ -1,193 +0,0 @@
-from dataclasses import dataclass, replace
-from typing import Any
-
-import numpy as np
-
-from optimagic.optimization.algorithm import InternalOptimizeResult
-from optimagic.optimization.convergence_report import get_convergence_report
-from optimagic.optimization.optimize_result import MultistartInfo, OptimizeResult
-from optimagic.parameters.conversion import Converter
-from optimagic.typing import AggregationLevel, Direction, PyTree
-from optimagic.utilities import isscalar
-
-
-@dataclass(frozen=True)
-class ExtraResultFields:
-    """Fields for OptimizeResult that are not part of InternalOptimizeResult."""
-
-    start_fun: float
-    start_params: PyTree
-    algorithm: str
-    direction: Direction
-    n_free: int
-
-
-def process_single_result(
-    raw_res: InternalOptimizeResult,
-    converter: Converter,
-    solver_type: AggregationLevel,
-    extra_fields: ExtraResultFields,
-) -> OptimizeResult:
-    """Process an internal optimizer result."""
-    params = converter.params_from_internal(raw_res.x)
-    if isscalar(raw_res.fun):
-        fun = float(raw_res.fun)
-    elif solver_type == AggregationLevel.LIKELIHOOD:
-        fun = float(np.sum(raw_res.fun))
-    elif solver_type == AggregationLevel.LEAST_SQUARES:
-        fun = np.dot(raw_res.fun, raw_res.fun)
-
-    if extra_fields.direction == Direction.MAXIMIZE:
-        fun = -fun
-
-    if raw_res.history is not None:
-        conv_report = get_convergence_report(
-            history=raw_res.history, direction=extra_fields.direction
-        )
-    else:
-        conv_report = None
-
-    out = OptimizeResult(
-        params=params,
-        fun=fun,
-        start_fun=extra_fields.start_fun,
-        start_params=extra_fields.start_params,
-        algorithm=extra_fields.algorithm,
-        direction=extra_fields.direction.value,
-        n_free=extra_fields.n_free,
-        message=raw_res.message,
-        success=raw_res.success,
-        n_fun_evals=raw_res.n_fun_evals,
-        n_jac_evals=raw_res.n_jac_evals,
-        n_hess_evals=raw_res.n_hess_evals,
-        n_iterations=raw_res.n_iterations,
-        status=raw_res.status,
-        jac=raw_res.jac,
-        hess=raw_res.hess,
-        hess_inv=raw_res.hess_inv,
-        max_constraint_violation=raw_res.max_constraint_violation,
-        history=raw_res.history,
-        algorithm_output=raw_res.info,
-        convergence_report=conv_report,
-    )
-    return out
-
-
-def process_multistart_result(
-    raw_res: InternalOptimizeResult,
-    converter: Converter,
-    solver_type: AggregationLevel,
-    extra_fields: ExtraResultFields,
-) -> OptimizeResult:
-    """Process results of internal optimizers.
-
-    Args:
-        res (dict): Results dictionary of an internal optimizer or multistart optimizer.
-
-    """
-    if raw_res.multistart_info is None:
-        raise ValueError("Multistart info is missing.")
-
-    if isinstance(raw_res, str):
-        res = _dummy_result_from_traceback(raw_res, extra_fields)
-    else:
-        res = process_single_result(
-            raw_res=raw_res,
-            converter=converter,
-            solver_type=solver_type,
-            extra_fields=extra_fields,
-        )
-
-        info = _process_multistart_info(
-            raw_res.multistart_info,
-            converter=converter,
-            solver_type=solver_type,
-            extra_fields=extra_fields,
-        )
-
-        # ==============================================================================
-        # create a convergence report for the multistart optimization; This is not
-        # the same as the convergence report for the individual local optimizations.
-        # ==============================================================================
-        crit_hist = [opt.fun for opt in info.local_optima]
-        params_hist = [opt.params for opt in info.local_optima]
-        time_hist = [np.nan for opt in info.local_optima]
-        hist = {"criterion": crit_hist, "params": params_hist, "runtime": time_hist}
-
-        conv_report = get_convergence_report(
-            history=hist,
-            direction=extra_fields.direction,
-        )
-
-        res.convergence_report = conv_report
-
-        res.algorithm = f"multistart_{res.algorithm}"
-        res.n_iterations = _sum_or_none([opt.n_iterations for opt in info.local_optima])
-
-        res.n_fun_evals = _sum_or_none([opt.n_fun_evals for opt in info.local_optima])
-        res.n_jac_evals = _sum_or_none([opt.n_jac_evals for opt in info.local_optima])
-
-        res.multistart_info = info
-    return res
-
-
-def _process_multistart_info(
-    info: dict[str, Any],
-    converter: Converter,
-    solver_type: AggregationLevel,
-    extra_fields: ExtraResultFields,
-) -> MultistartInfo:
-    starts = [converter.params_from_internal(x) for x in info["start_parameters"]]
-
-    optima = []
-    for res, start in zip(info["local_optima"], starts, strict=False):
-        replacements = {
-            "start_params": start,
-            "start_fun": None,
-        }
-
-        processed = process_single_result(
-            res,
-            converter=converter,
-            solver_type=solver_type,
-            extra_fields=replace(extra_fields, **replacements),
-        )
-        optima.append(processed)
-
-    sample = [converter.params_from_internal(x) for x in info["exploration_sample"]]
-
-    if extra_fields.direction == Direction.MINIMIZE:
-        exploration_res = info["exploration_results"]
-    else:
-        exploration_res = [-res for res in info["exploration_results"]]
-
-    return MultistartInfo(
-        start_parameters=starts,
-        local_optima=optima,
-        exploration_sample=sample,
-        exploration_results=exploration_res,
-    )
-
-
-def _dummy_result_from_traceback(
-    candidate: str, extra_fields: ExtraResultFields
-) -> OptimizeResult:
-    out = OptimizeResult(
-        params=extra_fields.start_params,
-        fun=extra_fields.start_fun,
-        start_fun=extra_fields.start_fun,
-        start_params=extra_fields.start_params,
-        algorithm=extra_fields.algorithm,
-        direction=extra_fields.direction.value,
-        n_free=extra_fields.n_free,
-        message=candidate,
-    )
-    return out
-
-
-def _sum_or_none(summands: list[int | None | float]) -> int | None:
-    if any(s is None for s in summands):
-        out = None
-    else:
-        out = int(np.array(summands).sum())
-    return out
diff --git a/src/optimagic/typing.py b/src/optimagic/typing.py
index 889152f79..b51f2a2df 100644
--- a/src/optimagic/typing.py
+++ b/src/optimagic/typing.py
@@ -156,3 +156,14 @@ class MultiStartIterationHistory(TupleLikeAccess):
     history: IterationHistory
     local_histories: list[IterationHistory] | None = None
     exploration: IterationHistory | None = None
+
+
+@dataclass(frozen=True)
+class ExtraResultFields:
+    """Fields for OptimizeResult that are not part of InternalOptimizeResult."""
+
+    start_fun: float | None
+    start_params: PyTree
+    algorithm: str
+    direction: Direction
+    n_free: int
diff --git a/src/optimagic/visualization/history_plots.py b/src/optimagic/visualization/history_plots.py
index 4c4797b53..dbd84ebb6 100644
--- a/src/optimagic/visualization/history_plots.py
+++ b/src/optimagic/visualization/history_plots.py
@@ -344,7 +344,7 @@ def _extract_plotting_data_from_results_object(
                 res.multistart_info.exploration_sample[::-1] + stacked["params"]
             )
             stacked["criterion"] = (
-                res.multistart_info.exploration_results.tolist()[::-1]
+                list(res.multistart_info.exploration_results)[::-1]
                 + stacked["criterion"]
             )
     else:
diff --git a/tests/optimagic/optimization/test_internal_optimization_problem.py b/tests/optimagic/optimization/test_internal_optimization_problem.py
index a0bb24a25..4c7f1f571 100644
--- a/tests/optimagic/optimization/test_internal_optimization_problem.py
+++ b/tests/optimagic/optimization/test_internal_optimization_problem.py
@@ -18,11 +18,29 @@
     InternalOptimizationProblem,
 )
 from optimagic.parameters.conversion import Converter
-from optimagic.typing import AggregationLevel, Direction, ErrorHandling, EvalTask
+from optimagic.typing import (
+    AggregationLevel,
+    Direction,
+    ErrorHandling,
+    EvalTask,
+    ExtraResultFields,
+)
+
+
+@pytest.fixture
+def extra_fields():
+    out = ExtraResultFields(
+        start_fun=100,
+        start_params=np.arange(3),
+        algorithm="bla",
+        direction=Direction.MINIMIZE,
+        n_free=3,
+    )
+    return out
 
 
 @pytest.fixture
-def base_problem():
+def base_problem(extra_fields):
     """Set up a basic InternalOptimizationProblem that can be modified for tests."""
 
     def fun(params):
@@ -72,6 +90,7 @@ def fun_and_jac(params):
         linear_constraints=linear_constraints,
         nonlinear_constraints=nonlinear_constraints,
         logger=None,
+        static_result_fields=extra_fields,
     )
 
     return problem
@@ -413,7 +432,7 @@ def test_max_problem_exploration_fun(max_problem):
 
 
 @pytest.fixture
-def pytree_problem(base_problem):
+def pytree_problem(extra_fields):
     def fun(params):
         assert isinstance(params, dict)
         return LeastSquaresFunctionValue(value=params)
@@ -479,6 +498,7 @@ def derivative_flatten(tree, x):
         linear_constraints=linear_constraints,
         nonlinear_constraints=nonlinear_constraints,
         logger=None,
+        static_result_fields=extra_fields,
     )
 
     return problem
@@ -543,7 +563,7 @@ def test_numerical_fun_and_jac_for_pytree_problem(pytree_problem):
 
 
 @pytest.fixture
-def error_min_problem():
+def error_min_problem(extra_fields):
     """Set up a basic InternalOptimizationProblem that can be modified for tests."""
 
     def fun(params):
@@ -603,6 +623,7 @@ def fun_and_jac(params):
         linear_constraints=linear_constraints,
         nonlinear_constraints=nonlinear_constraints,
         logger=None,
+        static_result_fields=extra_fields,
     )
 
     return problem
diff --git a/tests/optimagic/optimization/test_multistart.py b/tests/optimagic/optimization/test_multistart.py
index 06ec00236..b774c1ef7 100644
--- a/tests/optimagic/optimization/test_multistart.py
+++ b/tests/optimagic/optimization/test_multistart.py
@@ -6,13 +6,13 @@
 import pytest
 from numpy.testing import assert_array_almost_equal as aaae
 
-from optimagic.optimization.algorithm import InternalOptimizeResult
 from optimagic.optimization.multistart import (
     _draw_exploration_sample,
     get_batched_optimization_sample,
     run_explorations,
     update_convergence_state,
 )
+from optimagic.optimization.optimize_result import OptimizeResult
 
 
 @pytest.fixture()
@@ -129,13 +129,23 @@ def starts():
 
 @pytest.fixture()
 def results():
-    res = InternalOptimizeResult(
-        x=np.arange(3) + 1e-10,
+    res = OptimizeResult(
+        params=np.arange(3) + 1e-10,
         fun=4,
+        start_fun=5,
+        start_params=np.arange(3),
+        algorithm="bla",
+        direction="minimize",
+        n_free=3,
     )
     return [res]
 
 
+class DummyConverter:
+    def params_to_internal(self, params):
+        return params
+
+
 def test_update_state_converged(current_state, starts, results):
     criteria = {
         "xtol": 1e-3,
@@ -148,6 +158,7 @@ def test_update_state_converged(current_state, starts, results):
         results=results,
         convergence_criteria=criteria,
         solver_type="value",
+        converter=DummyConverter(),
     )
 
     aaae(new_state["best_x"], np.arange(3))
@@ -171,6 +182,7 @@ def test_update_state_not_converged(current_state, starts, results):
         results=results,
         convergence_criteria=criteria,
         solver_type="value",
+        converter=DummyConverter(),
     )
 
     assert not is_converged
diff --git a/tests/optimagic/optimization/test_process_result.py b/tests/optimagic/optimization/test_process_multistart_result.py
similarity index 60%
rename from tests/optimagic/optimization/test_process_result.py
rename to tests/optimagic/optimization/test_process_multistart_result.py
index 9f0d66134..69acbac2d 100644
--- a/tests/optimagic/optimization/test_process_result.py
+++ b/tests/optimagic/optimization/test_process_multistart_result.py
@@ -1,4 +1,4 @@
-from optimagic.optimization.process_results import _sum_or_none
+from optimagic.optimization.process_multistart_result import _sum_or_none
 
 
 def test_sum_or_none():