diff --git a/.gitignore b/.gitignore index 98823b67..a95df910 100644 --- a/.gitignore +++ b/.gitignore @@ -155,9 +155,10 @@ cython_debug/ llm_rules.md .python-version -benchmarks/results/* +benchmarks/**/results +benchmarks/**/plots docs/api/_build/* docs/api/reference/* -examples/**/results/* +examples/**/results docs/general/**/data_* docs/site/* \ No newline at end of file diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 00000000..42da5535 --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1,84 @@ +# Benchmarks + +Performance benchmarks compare Mesa Frames backends ("frames") with classic Mesa ("mesa") +implementations for a small set of representative models. They help track runtime scaling +and regressions. + +Currently included models: + +- **boltzmann**: Simple wealth exchange ("Boltzmann wealth") model. +- **sugarscape**: Sugarscape Immediate Growback variant (square grid sized relative to agent count). + +## Quick start + +```bash +uv run benchmarks/cli.py +``` + +That command (with defaults) will: + +- Benchmark both models (`boltzmann`, `sugarscape`). +- Use agent counts 1000, 2000, 3000, 4000, 5000. +- Run 100 steps per simulation. +- Repeat each configuration once. +- Save CSV results and generate plots. + +## CLI options + +Invoke `uv run benchmarks/cli.py --help` to see full help. Key options: + +| Option | Default | Description | +| ------ | ------- | ----------- | +| `--models` | `all` | Comma list or `all`; accepted: `boltzmann`, `sugarscape`. | +| `--agents` | `1000:5000:1000` | Single int or range `start:stop:step`. | +| `--steps` | `100` | Steps per simulation run. | +| `--repeats` | `1` | How many repeats per (model, backend, agents) config. Seed increments per repeat. | +| `--seed` | `42` | Base RNG seed. Incremented by repeat index. | +| `--save / --no-save` | `--save` | Persist per‑model CSVs. | +| `--plot / --no-plot` | `--plot` | Generate scaling plots (PNG + possibly other formats). | +| `--results-dir` | `benchmarks/results` | Root directory that will receive a timestamped subdirectory. | + +Range parsing: `A:B:S` includes `A, A+S, ... <= B`. Final value > B is dropped. + +## Output layout + +Each invocation uses a single UTC timestamp, e.g. `20251016_173702`: + +```text +benchmarks/ + results/ + 20251016_173702/ + boltzmann_perf_20251016_173702.csv + sugarscape_perf_20251016_173702.csv + plots/ + boltzmann_runtime_20251016_173702_dark.png + sugarscape_runtime_20251016_173702_dark.png + ... (other themed variants if enabled) +``` + +CSV schema (one row per completed run): + +| Column | Meaning | +| ------ | ------- | +| `model` | Model key (`boltzmann`, `sugarscape`). | +| `backend` | `mesa` or `frames`. | +| `agents` | Agent count for that run. | +| `steps` | Steps simulated. | +| `seed` | Seed used (base seed + repeat index). | +| `repeat_idx` | Repeat counter starting at 0. | +| `runtime_seconds` | Wall-clock runtime for that run. | +| `timestamp` | Shared timestamp identifier for the benchmark batch. | + +## Performance tips + +- Ensure the environment variable `MESA_FRAMES_RUNTIME_TYPECHECKING` is **unset** or set to `0` / `false` when collecting performance numbers. Enabling it adds runtime type validation overhead and the CLI will warn you. +- Run multiple repeats (`--repeats 5`) to smooth variance. + +## Extending benchmarks + +To benchmark an additional model: + +1. Add or import both a Mesa implementation and a Frames implementation exposing a `simulate(agents:int, steps:int, seed:int|None, ...)` function. +2. Register it in `benchmarks/cli.py` inside the `MODELS` dict with two backends (names must be `mesa` and `frames`). +3. Ensure any extra spatial parameters are derived from `agents` inside the runner lambda (see sugarscape example). +4. Run the CLI to verify new CSV columns still align. diff --git a/benchmarks/cli.py b/benchmarks/cli.py new file mode 100644 index 00000000..f97e99c4 --- /dev/null +++ b/benchmarks/cli.py @@ -0,0 +1,285 @@ +"""Typer CLI for running mesa vs mesa-frames performance benchmarks.""" + +from __future__ import annotations + +from dataclasses import dataclass +from datetime import datetime, timezone +import os +from pathlib import Path +from time import perf_counter +from typing import Literal, Annotated, Protocol, Optional + +import math +import polars as pl +import typer + +from examples.boltzmann_wealth import backend_frames as boltzmann_frames +from examples.boltzmann_wealth import backend_mesa as boltzmann_mesa +from examples.sugarscape_ig.backend_frames import model as sugarscape_frames +from examples.sugarscape_ig.backend_mesa import model as sugarscape_mesa +from examples.plotting import ( + plot_performance as _examples_plot_performance, +) + +app = typer.Typer(add_completion=False) + + +class RunnerP(Protocol): + def __call__(self, agents: int, steps: int, seed: int | None = None) -> None: ... + + +@dataclass(slots=True) +class Backend: + name: Literal["mesa", "frames"] + runner: RunnerP + + +@dataclass(slots=True) +class ModelConfig: + name: str + backends: list[Backend] + + +MODELS: dict[str, ModelConfig] = { + "boltzmann": ModelConfig( + name="boltzmann", + backends=[ + Backend(name="mesa", runner=boltzmann_mesa.simulate), + Backend(name="frames", runner=boltzmann_frames.simulate), + ], + ), + "sugarscape": ModelConfig( + name="sugarscape", + backends=[ + Backend( + name="mesa", + runner=lambda agents, steps, seed=None: sugarscape_mesa.simulate( + agents=agents, + steps=steps, + width=int(max(20, math.ceil((agents) ** 0.5) * 2)), + height=int(max(20, math.ceil((agents) ** 0.5) * 2)), + seed=seed, + ), + ), + Backend( + name="frames", + # Benchmarks expect a runner signature (agents:int, steps:int, seed:int|None) + # Sugarscape frames simulate requires width/height; choose square close to agent count. + runner=lambda agents, steps, seed=None: sugarscape_frames.simulate( + agents=agents, + steps=steps, + width=int(max(20, math.ceil((agents) ** 0.5) * 2)), + height=int(max(20, math.ceil((agents) ** 0.5) * 2)), + seed=seed, + ), + ), + ], + ), +} + + +def _parse_agents(value: str) -> list[int]: + value = value.strip() + if ":" in value: + parts = value.split(":") + if len(parts) != 3: + raise typer.BadParameter("Ranges must use start:stop:step format") + try: + start, stop, step = (int(part) for part in parts) + except ValueError as exc: + raise typer.BadParameter("Range values must be integers") from exc + if step <= 0: + raise typer.BadParameter("Step must be positive") + # We keep start = 0 to benchmark initialization time + if start < 0 or stop <= 0: + raise typer.BadParameter("Range endpoints must be positive") + if start > stop: + raise typer.BadParameter("Range start must be <= stop") + counts = list(range(start, stop + step, step)) + if counts[-1] > stop: + counts.pop() + return counts + try: + agents = int(value) + except ValueError as exc: # pragma: no cover - defensive + raise typer.BadParameter("Agent count must be an integer") from exc + if agents <= 0: + raise typer.BadParameter("Agent count must be positive") + return [agents] + + +def _parse_models(value: str) -> list[str]: + """Parse models option into a list of model keys. + + Accepts: + - "all" -> returns all available model keys + - a single model name -> returns [name] + - a comma-separated list of model names -> returns list + + Validates that each selected model exists in MODELS. + """ + value = value.strip() + if value == "all": + return list(MODELS.keys()) + # support comma-separated lists + parts = [part.strip() for part in value.split(",") if part.strip()] + if not parts: + raise typer.BadParameter("Model selection must not be empty") + unknown = [p for p in parts if p not in MODELS] + if unknown: + raise typer.BadParameter(f"Unknown model selection: {', '.join(unknown)}") + # preserve order and uniqueness + seen = set() + result: list[str] = [] + for p in parts: + if p not in seen: + seen.add(p) + result.append(p) + return result + + +def _plot_performance( + df: pl.DataFrame, model_name: str, output_dir: Path, timestamp: str +) -> None: + """Wrap examples.plotting.plot_performance to ensure consistent theming. + + The original benchmark implementation used simple seaborn styles (whitegrid / darkgrid). + Our example plotting utilities define a much darker, high-contrast *true* dark theme + (custom rc params overriding bg/fg colors). Reuse that logic here so the + benchmark dark plots match the example dark plots users see elsewhere. + """ + if df.is_empty(): + return + stem = f"{model_name}_runtime_{timestamp}" + _examples_plot_performance( + df.select(["agents", "runtime_seconds", "backend"]), + output_dir=output_dir, + stem=stem, + # Prefer more concise, publication-style wording + title=f"{model_name.title()} runtime scaling", + ) + + +@app.command() +def run( + models: Annotated[ + str | list[str], + typer.Option( + help="Models to benchmark: boltzmann, sugarscape, or all", + callback=_parse_models, + ), + ] = "all", + agents: Annotated[ + str | list[int], + typer.Option( + help="Agent count or range (start:stop:step)", callback=_parse_agents + ), + ] = "1000:5000:1000", + steps: Annotated[ + int, + typer.Option( + min=0, + help="Number of steps per run.", + ), + ] = 100, + repeats: Annotated[int, typer.Option(help="Repeats per configuration.", min=1)] = 1, + seed: Annotated[int, typer.Option(help="Optional RNG seed.")] = 42, + save: Annotated[bool, typer.Option(help="Persist benchmark CSV results.")] = True, + plot: Annotated[bool, typer.Option(help="Render performance plots.")] = True, + results_dir: Annotated[ + Path | None, + typer.Option( + help=( + "Base directory for benchmark outputs. A timestamped subdirectory " + "(e.g. results/20250101_120000) is created with CSV files at the root " + "and a 'plots/' subfolder for images. Defaults to the module's results directory." + ), + ), + ] = None, +) -> None: + """Run performance benchmarks for the selected models.""" + # Support both CLI (via callbacks) and direct function calls + if isinstance(models, str): + models = _parse_models(models) + if isinstance(agents, str): + agents = _parse_agents(agents) + # Ensure module-relative default is computed at call time (avoids import-time side effects) + if results_dir is None: + results_dir = Path(__file__).resolve().parent / "results" + + runtime_typechecking = os.environ.get("MESA_FRAMES_RUNTIME_TYPECHECKING", "") + if runtime_typechecking and runtime_typechecking.lower() not in {"0", "false"}: + typer.secho( + "Warning: MESA_FRAMES_RUNTIME_TYPECHECKING is enabled; benchmarks may run significantly slower.", + fg=typer.colors.YELLOW, + ) + rows: list[dict[str, object]] = [] + # Single timestamp per CLI invocation so all model results are co-located. + timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S") + # Create unified output layout: //{CSV files, plots/} + base_results_dir = results_dir + timestamp_dir = (base_results_dir / timestamp).resolve() + plots_subdir: Path = timestamp_dir / "plots" + for model in models: + config = MODELS[model] + typer.echo(f"Benchmarking {model} with agents {agents}") + for agents_count in agents: + for repeat_idx in range(repeats): + run_seed = seed + repeat_idx + for backend in config.backends: + start = perf_counter() + backend.runner(agents_count, steps, run_seed) + runtime = perf_counter() - start + rows.append( + { + "model": model, + "backend": backend.name, + "agents": agents_count, + "steps": steps, + "seed": run_seed, + "repeat_idx": repeat_idx, + "runtime_seconds": runtime, + "timestamp": timestamp, + } + ) + # Report completion of this run to the CLI + typer.echo( + f"Completed {backend.name} for model={model} agents={agents_count} steps={steps} seed={run_seed} repeat={repeat_idx} in {runtime:.3f}s" + ) + # Finished all runs for this model + typer.echo(f"Finished benchmarking model {model}") + + if not rows: + typer.echo("No benchmark data collected.") + return + df = pl.DataFrame(rows) + if save: + timestamp_dir.mkdir(parents=True, exist_ok=True) + for model in models: + model_df = df.filter(pl.col("model") == model) + csv_path = timestamp_dir / f"{model}_perf_{timestamp}.csv" + model_df.write_csv(csv_path) + typer.echo(f"Saved {model} results to {csv_path}") + if plot: + plots_subdir.mkdir(parents=True, exist_ok=True) + for model in models: + model_df = df.filter(pl.col("model") == model) + _plot_performance(model_df, model, plots_subdir, timestamp) + typer.echo(f"Saved {model} plots under {plots_subdir}") + + destinations: list[str] = [] + if save: + destinations.append(f"CSVs under {timestamp_dir}") + if plot: + destinations.append(f"plots under {plots_subdir}") + + if destinations: + typer.echo("Unified benchmark outputs written: " + "; ".join(destinations)) + else: + typer.echo( + "Benchmark run completed (save=False, plot=False; no files written)." + ) + + +if __name__ == "__main__": + app() diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 00000000..359bbaf8 --- /dev/null +++ b/examples/README.md @@ -0,0 +1,106 @@ +# Examples + +This directory contains runnable example models and shared plotting/utilities +used in the tutorials and benchmarks. Each example provides **two backends**: + +- `mesa` (classic Mesa, object-per-agent) +- `frames` (Mesa Frames, vectorised agent sets / dataframe-centric) + +They expose a consistent Typer CLI so you can compare outputs and timings. + +## Contents + +```text +examples/ + boltzmann_wealth/ + backend_mesa.py # Mesa implementation + CLI (simulate() + run) + backend_frames.py # Frames implementation + CLI (simulate() + run) + sugarscape_ig/ + backend_mesa/ # Mesa Sugarscape (agents + model + CLI) + backend_frames/ # Frames Sugarscape (agents + model + CLI) + plotting.py # Shared plotting helpers (Seaborn + dark theme) + utils.py # Small dataclasses for simulation results +``` + +## Quick start + +Always run via `uv` from the project root. The simplest way to run an example +backend is to execute the module: + +```bash +uv run examples/boltzmann_wealth/backend_frames.py +``` + +Each command will: + +1. Print a short banner with configuration. +2. Run the simulation and show elapsed time. +3. Emit a tail of the collected metrics (e.g. last 5 Gini values). +4. Save CSV metrics and optional plots in a timestamped directory under that + example's `results/` folder (unless overridden by `--results-dir`). + +## CLI symmetry + +Both backends accept similar options: + +- `--agents` (population size) +- `--steps` (number of simulated steps) +- `--seed` (optional RNG seed; Mesa backend resets model RNG) +- `--plot / --no-plot` (toggle plot generation) +- `--save-results / --no-save-results` (persist CSV outputs) +- `--results-dir` (override auto-created timestamped folder) + +The Frames Boltzmann backend stores model metrics in a Polars DataFrame via +`mesa_frames.DataCollector`; the Mesa backend uses the standard `mesa.DataCollector` +returning pandas DataFrames, then converts to Polars only for plotting so plots +look identical. + +## Data and metrics + +The saved CSV layout (Frames) places `model.csv` in the results directory with +columns like: `step, gini, `. +The Mesa implementations write +compatible CSVs. + +## Plotting helpers + +`examples/plotting.py` provides: + +- `plot_model_metrics(df, output_dir, stem, title, subtitle, agents, steps)` + Produces dark theme line plots of model-level metrics (currently Gini) and + stores PNG files under `output_dir` with names like `gini__dark.png`. +- `plot_performance(df, output_dir, stem, title)` used by `benchmarks/cli.py` to + generate runtime scaling plots. + +The dark theme matches the styling used in the documentation for visual +consistency. + +## Interacting programmatically + +Instead of using the CLIs you can import the simulation entry points directly: + +```python +from examples.boltzmann_wealth import backend_frames as bw_frames +result = bw_frames.simulate(agents=2000, steps=100, seed=123) +polars_df = result.datacollector.data["model"] # Polars DataFrame of metrics +``` + +Each `simulate()` returns a small dataclass (`FramesSimulationResult` or +`MesaSimulationResult`) holding the respective `DataCollector` instance so you +can further analyse the collected data. + +## Tips + +- To compare backends fairly, disable runtime type checking when measuring performance: + set environment variable `MESA_FRAMES_RUNTIME_TYPECHECKING=0`. +- Use the same `--seed` across runs for reproducible trajectories (given the + stochastic nature of agent interactions). +- Larger Sugarscape grids (width/height) increase memory and runtime; choose + sizes proportional to the square root of agent count for balanced density. + +## Adding Examples + +You can adapt these scripts to prototype new models: copy a backend pair, +rename the module, and implement your agent rules while keeping the API +surface (`simulate`, `run`) consistent so tooling and documentation patterns +continue to apply. diff --git a/examples/__init__.py b/examples/__init__.py new file mode 100644 index 00000000..069e9dc5 --- /dev/null +++ b/examples/__init__.py @@ -0,0 +1,6 @@ +"""Examples package for the repository.""" + +__all__ = [ + "boltzmann_wealth", + "sugarscape_ig", +] diff --git a/examples/boltzmann_wealth/README.md b/examples/boltzmann_wealth/README.md new file mode 100644 index 00000000..51b3e260 --- /dev/null +++ b/examples/boltzmann_wealth/README.md @@ -0,0 +1,96 @@ +# Boltzmann Wealth Exchange Model + +## Overview + +This example implements a simple wealth exchange ("Boltzmann money") model in two +backends: + +- `backend_frames.py` (Mesa Frames / vectorised `AgentSet`) +- `backend_mesa.py` (classic Mesa / object-per-agent) + +Both expose a Typer CLI with symmetric options so you can compare correctness +and performance directly. + +## Concept + +Each agent starts with 1 unit of wealth. At every step: + +1. Frames backend: all agents with strictly positive wealth become potential donors. + Each donor gives 1 unit of wealth, and a recipient is drawn (with replacement) + for every donating agent. A single vectorised update applies donor losses and + recipient gains. +2. Mesa backend: agents are shuffled and iterate sequentially; each agent with + positive wealth transfers 1 unit to a randomly selected peer. + +The stochastic exchange process leads to an emergent, increasingly unequal +wealth distribution and rising Gini coefficient, typically approaching a stable +level below 1 (due to conservation and continued mixing). + +## Reported Metrics + +The model records per-step population Gini (`gini`). You can extend reporters by +adding lambdas to `model_reporters` in either backend's constructor. + +Notes on interpretation: + +- Early steps: Gini ~ 0 (uniform initial wealth). +- Mid phase: Increasing Gini as random exchanges concentrate wealth. +- Late phase: Fluctuating plateau (a stochastic steady state) — exact level + varies with agent count and RNG seed. + +## Running + +Always run examples from the project root using `uv`: + +```bash +uv run examples/boltzmann_wealth/backend_frames.py --agents 5000 --steps 200 --seed 123 --plot --save-results +uv run examples/boltzmann_wealth/backend_mesa.py --agents 5000 --steps 200 --seed 123 --plot --save-results +``` + +## CLI options + +- `--agents` Number of agents (default 5000) +- `--steps` Simulation steps (default 100) +- `--seed` Optional RNG seed for reproducibility +- `--plot / --no-plot` Generate line plot(s) of Gini +- `--save-results / --no-save-results` Persist CSV metrics +- `--results-dir` Override the auto-timestamped directory under `results/` + +Frames backend additionally warns if runtime type checking is enabled because it +slows vectorised operations: set `MESA_FRAMES_RUNTIME_TYPECHECKING=0` for fair +performance comparisons. + +## Outputs + +Each run creates (or uses) a results directory like: + +```text +examples/boltzmann_wealth/results/20251016_173702/ + model.csv # step,gini + gini__dark.png (and possibly other theme variants) +``` + +Tail metrics are printed to console for quick inspection: + +```text +Metrics in the final 5 steps: shape: (5, 2) +┌──────┬───────┐ +│ step ┆ gini │ +│ --- ┆ --- │ +│ i64 ┆ f64 │ +├──────┼───────┤ +│ ... ┆ ... │ +└──────┴───────┘ +``` + +## Performance & Benchmarking + +Use the shared benchmarking CLI to compare scaling, check out `benchmarks/README.md`. + +## Programmatic Use + +```python +from examples.boltzmann_wealth import backend_frames as bw_frames +result = bw_frames.simulate(agents=10000, steps=250, seed=42) +metrics = result.datacollector.data["model"] # Polars DataFrame +``` diff --git a/examples/boltzmann_wealth/__init__.py b/examples/boltzmann_wealth/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/examples/boltzmann_wealth/backend_frames.py b/examples/boltzmann_wealth/backend_frames.py new file mode 100644 index 00000000..a4403aa8 --- /dev/null +++ b/examples/boltzmann_wealth/backend_frames.py @@ -0,0 +1,188 @@ +"""Mesa-frames implementation of the Boltzmann wealth model with Typer CLI.""" + +from __future__ import annotations + +from datetime import datetime, timezone +from pathlib import Path +from typing import Annotated + +import numpy as np +import os +import polars as pl +import typer +from time import perf_counter + +from mesa_frames import AgentSet, DataCollector, Model +from examples.utils import FramesSimulationResult +from examples.plotting import plot_model_metrics + + +# Note: by default we create a timestamped results directory under `results/`. +# The CLI will accept optional `results_dir` and `plots_dir` arguments to override. + + +def gini(frame: pl.DataFrame) -> float: + wealth = frame["wealth"] if "wealth" in frame.columns else pl.Series([]) + if wealth.is_empty(): + return float("nan") + values = wealth.to_numpy().astype(np.float64) + if values.size == 0: + return float("nan") + if np.allclose(values, 0.0): + return 0.0 + if np.allclose(values, values[0]): + return 0.0 + sorted_vals = np.sort(values) + n = sorted_vals.size + cumulative = np.cumsum(sorted_vals) + total = cumulative[-1] + if total == 0: + return 0.0 + index = np.arange(1, n + 1, dtype=np.float64) + return float((2.0 * np.dot(index, sorted_vals) / (n * total)) - (n + 1) / n) + + +class MoneyAgents(AgentSet): + """Vectorised agent set for the Boltzmann wealth exchange model.""" + + def __init__(self, model: Model, agents: int) -> None: + super().__init__(model) + self += pl.DataFrame({"wealth": pl.Series(np.ones(agents, dtype=np.int64))}) + + def step(self) -> None: + self.select(pl.col("wealth") > 0) + if len(self.active_agents) == 0: + return + # Use the model RNG to seed Polars sampling so results are reproducible + recipients = self.df.sample( + n=len(self.active_agents), + with_replacement=True, + seed=self.random.integers(np.iinfo(np.int32).max), + ) + # Combine donor loss (1 per active agent) and recipient gains in a single adjustment. + gains = recipients.group_by("unique_id").len() + self.df = ( + self.df.join(gains, on="unique_id", how="left") + .with_columns( + ( + pl.col("wealth") + # each active agent loses 1 unit of wealth + + pl.when(pl.col("wealth") > 0).then(-1).otherwise(0) + # each agent gains 1 unit of wealth for each time they were selected as a recipient + + pl.col("len").fill_null(0) + ).alias("wealth") + ) + .drop("len") + ) + + +class MoneyModel(Model): + """Mesa-frames model that mirrors the Mesa implementation.""" + + def __init__( + self, agents: int, *, seed: int | None = None, results_dir: Path | None = None + ) -> None: + super().__init__(seed) + self.sets += MoneyAgents(self, agents) + # For benchmarks we frequently call simulate() without providing a results_dir. + # Persisting to disk would add unnecessary IO overhead and a missing storage_uri + # currently raises in DataCollector validation. Fallback to in-memory collection + # when no results_dir is supplied; otherwise write CSV files under results_dir. + if results_dir is None: + storage = "memory" + storage_uri = None + else: + storage = "csv" + storage_uri = str(results_dir) + self.datacollector = DataCollector( + model=self, + model_reporters={ + "gini": lambda m: gini(m.sets[0].df), + }, + storage=storage, + storage_uri=storage_uri, + ) + + def step(self) -> None: + self.sets.do("step") + self.datacollector.collect() + + def run(self, steps: int) -> None: + for _ in range(steps): + self.step() + + +def simulate( + agents: int, + steps: int, + seed: int | None = None, + results_dir: Path | None = None, +) -> FramesSimulationResult: + model = MoneyModel(agents, seed=seed, results_dir=results_dir) + model.run(steps) + # collect data from datacollector into memory first + return FramesSimulationResult(datacollector=model.datacollector) + + +app = typer.Typer(add_completion=False) + + +@app.command() +def run( + agents: Annotated[int, typer.Option(help="Number of agents to simulate.")] = 5000, + steps: Annotated[int, typer.Option(help="Number of model steps to run.")] = 100, + seed: Annotated[int | None, typer.Option(help="Optional RNG seed.")] = None, + plot: Annotated[bool, typer.Option(help="Render Seaborn plots.")] = True, + save_results: Annotated[bool, typer.Option(help="Persist metrics as CSV.")] = True, + results_dir: Annotated[ + Path | None, + typer.Option( + help="Directory to write CSV results and plots into. If omitted a timestamped subdir under `results/` is used." + ), + ] = None, +) -> None: + runtime_typechecking = os.environ.get("MESA_FRAMES_RUNTIME_TYPECHECKING", "") + if runtime_typechecking and runtime_typechecking.lower() not in {"0", "false"}: + typer.secho( + "Warning: MESA_FRAMES_RUNTIME_TYPECHECKING is enabled; this run will be slower.", + fg=typer.colors.YELLOW, + ) + typer.echo( + f"Running Boltzmann wealth model (mesa-frames) with {agents} agents for {steps} steps" + ) + timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S") + if results_dir is None: + results_dir = ( + Path(__file__).resolve().parent / "results" / timestamp + ).resolve() + results_dir.mkdir(parents=True, exist_ok=True) + start_time = perf_counter() + result = simulate(agents=agents, steps=steps, seed=seed, results_dir=results_dir) + + typer.echo(f"Simulation complete in {perf_counter() - start_time:.2f} seconds") + + model_metrics = result.datacollector.data["model"].select("step", "gini") + + typer.echo(f"Metrics in the final 5 steps: {model_metrics.tail(5)}") + + if save_results: + result.datacollector.flush() + typer.echo(f"Saved CSV results under {results_dir}") + + if plot: + stem = f"gini_{timestamp}" + # write plots into the results directory so outputs are colocated + plot_model_metrics( + model_metrics, + results_dir, + stem, + title="Boltzmann wealth — Gini", + subtitle=f"mesa-frames backend; seed={result.datacollector.seed}", + agents=agents, + steps=steps, + ) + typer.echo(f"Saved plots under {results_dir}") + + +if __name__ == "__main__": + app() diff --git a/examples/boltzmann_wealth/backend_mesa.py b/examples/boltzmann_wealth/backend_mesa.py new file mode 100644 index 00000000..dda875ac --- /dev/null +++ b/examples/boltzmann_wealth/backend_mesa.py @@ -0,0 +1,178 @@ +"""Mesa implementation of the Boltzmann wealth model with Typer CLI.""" + +from __future__ import annotations + +from datetime import datetime, timezone +from pathlib import Path +from typing import Annotated +from collections.abc import Iterable + +import mesa +from mesa.datacollection import DataCollector +import numpy as np +import polars as pl +import typer +from time import perf_counter + +from examples.utils import MesaSimulationResult +from examples.plotting import plot_model_metrics + + +def gini(values: Iterable[float]) -> float: + """Compute the Gini coefficient from an iterable of wealth values.""" + array = np.fromiter(values, dtype=float) + if array.size == 0: + return float("nan") + if np.allclose(array, 0.0): + return 0.0 + if np.allclose(array, array[0]): + return 0.0 + sorted_vals = np.sort(array) + n = sorted_vals.size + cumulative = np.cumsum(sorted_vals) + total = cumulative[-1] + if total == 0: + return 0.0 + index = np.arange(1, n + 1, dtype=float) + return float((2.0 * np.dot(index, sorted_vals) / (n * total)) - (n + 1) / n) + + +class MoneyAgent(mesa.Agent): + """Agent that passes one unit of wealth to a random neighbour.""" + + def __init__(self, model: MoneyModel) -> None: + super().__init__(model) + self.wealth = 1 + + def step(self) -> None: + if self.wealth <= 0: + return + other = self.random.choice(self.model.agent_list) + if other is None: + return + other.wealth += 1 + self.wealth -= 1 + + +class MoneyModel(mesa.Model): + """Mesa backend that mirrors the mesa-frames Boltzmann wealth example.""" + + def __init__(self, agents: int, *, seed: int | None = None) -> None: + super().__init__() + if seed is None: + seed = self.random.randint(0, np.iinfo(np.int32).max) + self.reset_randomizer(seed) + self.agent_list: list[MoneyAgent] = [] + for _ in range(agents): + # NOTE: storing agents in a Python list keeps iteration fast for benchmarks. + agent = MoneyAgent(self) + self.agent_list.append(agent) + self.datacollector = DataCollector( + model_reporters={ + "gini": lambda m: gini(a.wealth for a in m.agent_list), + "seed": lambda m: seed, + } + ) + self.datacollector.collect(self) + + def step(self) -> None: + self.random.shuffle(self.agent_list) + for agent in self.agent_list: + agent.step() + self.datacollector.collect(self) + + def run(self, steps: int) -> None: + for _ in range(steps): + self.step() + + +def simulate(agents: int, steps: int, seed: int | None = None) -> MesaSimulationResult: + """Run the Mesa Boltzmann wealth model.""" + model = MoneyModel(agents, seed=seed) + model.run(steps) + + return MesaSimulationResult(datacollector=model.datacollector) + + +app = typer.Typer(add_completion=False) + + +@app.command() +def run( + agents: Annotated[int, typer.Option(help="Number of agents to simulate.")] = 5000, + steps: Annotated[int, typer.Option(help="Number of model steps to run.")] = 100, + seed: Annotated[int | None, typer.Option(help="Optional RNG seed.")] = None, + plot: Annotated[bool, typer.Option(help="Render plots.")] = True, + save_results: Annotated[ + bool, + typer.Option(help="Persist metrics as CSV."), + ] = True, + results_dir: Annotated[ + Path | None, + typer.Option( + help=( + "Directory to write CSV results and plots into. If omitted a " + "timestamped subdir under `results/` is used." + ) + ), + ] = None, +) -> None: + """Execute the Mesa Boltzmann wealth simulation.""" + + typer.echo( + f"Running Boltzmann wealth model (mesa) with {agents} agents for {steps} steps" + ) + + # Resolve output folder + timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S") + if results_dir is None: + results_dir = ( + Path(__file__).resolve().parent / "results" / timestamp + ).resolve() + results_dir.mkdir(parents=True, exist_ok=True) + + start_time = perf_counter() + # Run simulation (Mesa‑idiomatic): we only use DataCollector's public API + result = simulate(agents=agents, steps=steps, seed=seed) + typer.echo(f"Simulation completed in {perf_counter() - start_time:.3f} seconds") + dc = result.datacollector + + # ---- Extract metrics (no helper, no monkey‑patch): + # DataCollector returns a pandas DataFrame with the index as the step. + model_pd = dc.get_model_vars_dataframe() + model_pd = model_pd.reset_index() + # The first column is the step index; normalize name to "step". + model_pd = model_pd.rename(columns={model_pd.columns[0]: "step"}) + extracted_seed = model_pd["seed"].iloc[0] + model_pd = model_pd[["step", "gini"]] + + # Show a short tail in console for quick inspection + tail_str = model_pd.tail(5).to_string(index=False) + typer.echo(f"Metrics in the final 5 steps:\n{tail_str}") + + # ---- Save CSV (same filename/layout as frames backend expects) + if save_results: + csv_path = results_dir / "model.csv" + model_pd.to_csv(csv_path, index=False) + + # ---- Plot (convert to Polars to reuse the shared plotting helper) + if plot and not model_pd.empty: + model_pl = pl.from_pandas(model_pd) + stem = f"gini_{timestamp}" + plot_model_metrics( + model_pl, + results_dir, + stem, + title="Boltzmann wealth — Gini", + subtitle=f"mesa backend; seed={extracted_seed}", + agents=agents, + steps=steps, + ) + typer.echo(f"Saved plots under {results_dir}") + + if save_results: + typer.echo(f"Saved CSV results under {results_dir}") + + +if __name__ == "__main__": + app() diff --git a/examples/plotting.py b/examples/plotting.py new file mode 100644 index 00000000..b2a0473d --- /dev/null +++ b/examples/plotting.py @@ -0,0 +1,306 @@ +# examples/plotting.py +from __future__ import annotations + +from pathlib import Path +from collections.abc import Sequence +import re + +import polars as pl +import seaborn as sns +import matplotlib.pyplot as plt +from matplotlib.ticker import FormatStrFormatter +from matplotlib.figure import Figure +from matplotlib.axes import Axes + +# ----------------------------- Shared theme ---------------------------------- + +_THEMES = { + "light": dict( + style="whitegrid", + rc={ + "axes.spines.top": False, + "axes.spines.right": False, + "legend.facecolor": "#ffffff", + "legend.edgecolor": "#d0d0d0", + }, + ), + "dark": dict( + style="whitegrid", + rc={ + # real dark background + readable foreground + "figure.facecolor": "#0b1021", + "axes.facecolor": "#0b1021", + "axes.edgecolor": "#d6d6d7", + "axes.labelcolor": "#e8e8ea", + "text.color": "#e8e8ea", + "xtick.color": "#c9c9cb", + "ytick.color": "#c9c9cb", + "grid.color": "#2a2f4a", + "grid.alpha": 0.35, + "axes.spines.top": False, + "axes.spines.right": False, + "legend.facecolor": "#121734", + "legend.edgecolor": "#3b3f5a", + }, + ), +} + + +def _shorten_seed(text: str | None) -> str | None: + """Turn '... seed=1234567890123' into '... seed=12345678…' if present.""" + if not text: + return text + m = re.search(r"seed=([^;,\s]+)", text) + if not m: + return text + raw = m.group(1) + short = (raw[:8] + "…") if len(raw) > 10 else raw + return re.sub(r"seed=[^;,\s]+", f"seed={short}", text) + + +def _apply_titles(fig: Figure, ax: Axes, title: str, subtitle: str | None) -> None: + """Consistent title placement: figure-level title + small italic subtitle.""" + fig.suptitle(title, fontsize=18, y=0.98) + ax.set_title(_shorten_seed(subtitle) or "", fontsize=12, fontstyle="italic", pad=4) + + +def _finalize_and_save(fig: Figure, output_dir: Path, stem: str, theme: str) -> None: + """Tight layout with space for suptitle, export PNG + (optional) SVG.""" + output_dir.mkdir(parents=True, exist_ok=True) + fig.tight_layout(rect=[0, 0, 1, 0.94]) + png = output_dir / f"{stem}_{theme}.png" + fig.savefig(png, dpi=300) + try: + fig.savefig(output_dir / f"{stem}_{theme}.svg", bbox_inches="tight") + except Exception: + pass # SVG is a nice-to-have + plt.close(fig) + + +# -------------------------- Public: model metrics ---------------------------- + + +def plot_model_metrics( + metrics: pl.DataFrame, + output_dir: Path, + stem: str, + title: str, + *, + subtitle: str = "", + figsize: tuple[int, int] | None = None, + agents: int | None = None, + steps: int | None = None, +) -> None: + """ + Plot time-series metrics from a Polars DataFrame and export light/dark PNG/SVG. + + - Auto-detects `step` or adds one if missing. + - Melts all non-`step` columns into long form. + - If there's a single metric (e.g., 'gini'), removes legend and uses a + descriptive y-axis label (e.g., 'Gini coefficient'). + - Optional `agents` and `steps` will be appended to the suptitle as + "(N=, T=)"; if `steps` is omitted it will be inferred + from the `step` column when available. + """ + if metrics.is_empty(): + return + + if "step" not in metrics.columns: + metrics = metrics.with_row_index("step") + + # If steps not provided, try to infer from the data (max step + 1). Keep it None if we can't determine it. + if steps is None: + try: + steps = int(metrics.select(pl.col("step").max()).item()) + 1 + except Exception: + steps = None + + value_cols: Sequence[str] = [c for c in metrics.columns if c != "step"] + if not value_cols: + return + + long = ( + metrics.select(["step", *value_cols]) + .unpivot( + index="step", on=value_cols, variable_name="metric", value_name="value" + ) + .to_pandas() + ) + + # Compose informative title with optional (N, T) + if agents is not None and steps is not None: + full_title = f"{title} (N={agents}, T={steps})" + elif agents is not None: + full_title = f"{title} (N={agents})" + elif steps is not None: + full_title = f"{title} (T={steps})" + else: + full_title = title + + for theme, cfg in _THEMES.items(): + sns.set_theme(**cfg) + sns.set_context("talk") + fig, ax = plt.subplots(figsize=figsize or (10, 6)) + + sns.lineplot(data=long, x="step", y="value", hue="metric", linewidth=2, ax=ax) + + _apply_titles(fig, ax, full_title, subtitle) + + ax.set_xlabel("Step") + unique_metrics = long["metric"].unique() + + if len(unique_metrics) == 1: + name = unique_metrics[0] + ax.set_ylabel(name.capitalize()) + leg = ax.get_legend() + if leg is not None: + leg.remove() + vals = long.loc[long["metric"] == name, "value"] + if not vals.empty: + vmin, vmax = float(vals.min()), float(vals.max()) + pad = max(0.005, (vmax - vmin) * 0.05) + ax.set_ylim(vmin - pad, vmax + pad) + else: + ax.set_ylabel("Value") + leg = ax.get_legend() + if leg is not None: + # Remove redundant legend title and ensure a readable + # boxed background for the light theme (subtle) while + # keeping a slightly transparent frame for dark theme. + leg.set_title(None) + frame = leg.get_frame() + if theme == "dark": + frame.set_alpha(0.8) + else: + frame.set_alpha(0.9) + frame.set_edgecolor("#d0d0d0") + frame.set_linewidth(0.8) + + ax.yaxis.set_major_formatter(FormatStrFormatter("%.3f")) + ax.margins(x=0.01) + + _finalize_and_save(fig, output_dir, stem, theme) + + +# -------------------------- Public: agent metrics ---------------------------- + + +def plot_agent_metrics( + agent_metrics: pl.DataFrame, + output_dir: Path, + stem: str, + *, + title: str = "Agent metrics", + subtitle: str = "", + figsize: tuple[int, int] | None = None, +) -> None: + """ + Plot agent-level metrics (multi-series) and export light/dark PNG/SVG. + + - Preserves common id vars if present: `step`, `seed`, `batch`. + - Uses the first column as id if none of the preferred ids exist. + """ + if agent_metrics is None or agent_metrics.is_empty(): + return + + preferred = ["step", "seed", "batch"] + id_vars = [c for c in preferred if c in agent_metrics.columns] or [ + agent_metrics.columns[0] + ] + + # Determine which columns to unpivot (all columns except the id vars). + value_cols = [c for c in agent_metrics.columns if c not in id_vars] + if not value_cols: + return + + melted = agent_metrics.unpivot( + index=id_vars, on=value_cols, variable_name="metric", value_name="value" + ).to_pandas() + + xcol = id_vars[0] + + for theme, cfg in _THEMES.items(): + sns.set_theme(**cfg) + sns.set_context("talk") + fig, ax = plt.subplots(figsize=figsize or (10, 6)) + + sns.lineplot(data=melted, x=xcol, y="value", hue="metric", linewidth=1.8, ax=ax) + + _apply_titles(fig, ax, title, subtitle) + ax.set_xlabel(xcol.capitalize()) + ax.set_ylabel("Value") + + leg = ax.get_legend() + if leg is not None: + leg.set_title(None) + frame = leg.get_frame() + if theme == "dark": + frame.set_alpha(0.8) + else: + frame.set_alpha(0.9) + frame.set_edgecolor("#d0d0d0") + frame.set_linewidth(0.8) + + _finalize_and_save(fig, output_dir, f"{stem}_agents", theme) + + +# -------------------------- Public: performance ------------------------------ + + +def plot_performance( + df: pl.DataFrame, + output_dir: Path, + stem: str, + *, + title: str = "Runtime vs agents", + subtitle: str = "", + figsize: tuple[int, int] | None = None, +) -> None: + """ + Plot backend performance (runtime vs agents) with mean±sd error bars. + Expected columns: `agents`, `runtime_seconds`, `backend`. + """ + if df.is_empty(): + return + + pdf = df.to_pandas() + + for theme, cfg in _THEMES.items(): + sns.set_theme(**cfg) + sns.set_context("talk") + fig, ax = plt.subplots(figsize=figsize or (10, 6)) + + sns.lineplot( + data=pdf, + x="agents", + y="runtime_seconds", + hue="backend", + estimator="mean", + errorbar="sd", + marker="o", + ax=ax, + ) + + _apply_titles(fig, ax, title, subtitle) + ax.set_xlabel("Agents") + ax.set_ylabel("Runtime (seconds)") + leg = ax.get_legend() + if leg is not None: + # Remove redundant legend title (backend) for both themes – label colors already distinguish. + leg.set_title(None) + frame = leg.get_frame() + if theme == "dark": + frame.set_alpha(0.8) + else: # light theme: subtle boxed legend for readability on white grid + frame.set_alpha(0.9) + frame.set_edgecolor("#d0d0d0") + frame.set_linewidth(0.8) + + _finalize_and_save(fig, output_dir, stem, theme) + + +__all__ = [ + "plot_agent_metrics", + "plot_model_metrics", + "plot_performance", +] diff --git a/examples/sugarscape_ig/README.md b/examples/sugarscape_ig/README.md new file mode 100644 index 00000000..d3734b21 --- /dev/null +++ b/examples/sugarscape_ig/README.md @@ -0,0 +1,105 @@ +# Sugarscape IG (Instant Growback) + +## Overview + +This directory contains a minimal Instant Growback Sugarscape implementation in +both backends: + +- `backend_frames/` parallel (vectorised) movement variant using Mesa Frames +- `backend_mesa/` sequential (asynchronous) movement variant using classic Mesa + +The Instant Growback (IG) rule sequence is: move -> eat -> regrow -> collect. +Agents harvest sugar, pay metabolism costs, possibly die (starve), and empty +cells instantly regrow to their `max_sugar` value. + +## Concept + +Each agent has integer traits: + +- `sugar` (current stores) +- `metabolism` (per-step consumption) +- `vision` (how far the agent can see in cardinal directions) + +Movement policy (both backends conceptually): + +1. Sense visible cells along N/E/S/W up to `vision` steps (including origin). +2. Rank candidate cells by: (a) sugar (desc), (b) distance (asc), (c) coordinates + as deterministic tie-breaker. +3. Choose highest-ranked empty cell; fall back to origin if none available. + +The Frames parallel variant resolves conflicts by iterative lottery rounds using +rank promotion; the sequential Mesa variant inherently orders moves by shuffled +agent iteration. + +After moving, agents harvest sugar on their cell, pay metabolism, and starved +agents are removed. Empty cells regrow to their `max_sugar` value immediately. + +## Reported Metrics + +Both backends record population-level reporters each step: + +- `mean_sugar` Average sugar per surviving agent. +- `total_sugar` Aggregate sugar held by living agents. +- `agents_alive` Population size (declines as agents starve). +- `gini` Inequality in sugar holdings (0 = equal, higher = more unequal). +- `corr_sugar_metabolism` Pearson correlation (do high-metabolism agents retain sugar?). +- `corr_sugar_vision` Pearson correlation (does greater vision correlate with sugar?). + +Notes on interpretation: + +- `agents_alive` typically decreases until a quasi-steady state (metabolism vs regrowth) or total collapse. +- `mean_sugar` and `total_sugar` may stabilise if regrowth balances metabolism. +- Rising `gini` indicates emerging inequality; sustained high values suggest strong positional advantages. +- Correlations near 0 imply weak linear relationships; positive `corr_sugar_vision` suggests high-vision agents aid resource gathering. Negative `corr_sugar_metabolism` can emerge if high-metabolism agents accelerate starvation. + +## Running + +From project root using `uv`: + +```bash +uv run examples/sugarscape_ig/backend_frames/model.py --agents 400 --width 40 --height 40 --steps 60 --seed 123 --plot --save-results +uv run examples/sugarscape_ig/backend_mesa/model.py --agents 400 --width 40 --height 40 --steps 60 --seed 123 --plot --save-results +``` + +## CLI options + +- `--agents` Number of agents (default 400) +- `--width`, `--height` Grid dimensions (default 40x40) +- `--steps` Max steps (default 60) +- `--max-sugar` Initial/regrowth max sugar per cell (default 4) +- `--seed` Optional RNG seed +- `--plot / --no-plot` Generate per-metric plots +- `--save-results / --no-save-results` Persist CSV outputs +- `--results-dir` Override auto timestamped directory under `results/` + +Frames backend warns if `MESA_FRAMES_RUNTIME_TYPECHECKING` is enabled (disable for benchmarks). + +## Outputs + +Example output directory (frames): + +```text +examples/sugarscape_ig/backend_frames/results/20251016_173702/ + model.csv + plots/ + gini__dark.png + agents_alive__dark.png + mean_sugar__dark.png + ... +``` + +`model.csv` columns include: `step`, `mean_sugar`, `total_sugar`, `agents_alive`, +`gini`, `corr_sugar_metabolism`, `corr_sugar_vision`, plus backend-specific bookkeeping. +Mesa backend normalises to the same layout (excluding internal columns). + +## Performance & Benchmarking + +Use the shared benchmarking CLI to compare scaling, check out `benchmarks/README.md`. + +## Programmatic Use + +```python +from examples.sugarscape_ig.backend_frames import model as sg_frames +res = sg_frames.simulate(agents=500, steps=80, width=50, height=50, seed=42) +metrics = res.datacollector.data["model"] # Polars DataFrame +``` diff --git a/examples/sugarscape_ig/backend_frames/__init__.py b/examples/sugarscape_ig/backend_frames/__init__.py new file mode 100644 index 00000000..614fa64d --- /dev/null +++ b/examples/sugarscape_ig/backend_frames/__init__.py @@ -0,0 +1 @@ +"""mesa-frames backend package for Sugarscape IG examples.""" diff --git a/examples/sugarscape_ig/backend_frames/agents.py b/examples/sugarscape_ig/backend_frames/agents.py new file mode 100644 index 00000000..3a9c4932 --- /dev/null +++ b/examples/sugarscape_ig/backend_frames/agents.py @@ -0,0 +1,645 @@ +"""Agent implementations for the Sugarscape IG example (mesa-frames). + +This module provides the parallel (synchronous) movement variant as in the +advanced tutorial. The code and comments mirror +docs/general/tutorials/3_advanced_tutorial.py. +""" + +from __future__ import annotations + +import numpy as np +import polars as pl + +from mesa_frames import AgentSet, Model + + +class AntsBase(AgentSet): + """Base agent set for the Sugarscape tutorial. + + This class implements the common behaviour shared by all agent + movement variants (sequential, numba-accelerated and parallel). + + Notes + ----- + - Agents are expected to have integer traits: ``sugar``, ``metabolism`` + and ``vision``. These are validated in :meth:`__init__`. + - Subclasses must implement :meth:`move` which changes agent positions + on the grid (via :meth:`mesa_frames.Grid` helpers). + """ + + def __init__(self, model: Model, agent_frame: pl.DataFrame) -> None: + """Initialise the agent set and validate required trait columns. + + Parameters + ---------- + model : Model + The parent model which provides RNG and space. + agent_frame : pl.DataFrame + A Polars DataFrame with at least the columns ``sugar``, + ``metabolism`` and ``vision`` for each agent. + + Raises + ------ + ValueError + If required trait columns are missing from ``agent_frame``. + """ + super().__init__(model) + required = {"sugar", "metabolism", "vision"} + missing = required.difference(agent_frame.columns) + if missing: + raise ValueError( + f"Initial agent frame must include columns {sorted(required)}; missing {sorted(missing)}." + ) + self.add(agent_frame.clone()) + + def step(self) -> None: + """Advance the agent set by one time step. + + The update order is important: agents are first shuffled to randomise + move order (this is important only for sequential variants), then they move, harvest sugar + from their occupied cells, and finally any agents whose sugar falls + to zero or below are removed. + """ + # Randomise ordering for movement decisions when required by the + # implementation (e.g. sequential update uses this shuffle). + self.shuffle(inplace=True) + # Movement policy implemented by subclasses. + self.move() + # Agents harvest sugar on their occupied cells. + self.eat() + # Remove agents that starved after eating. + self._remove_starved() + + def move(self) -> None: # pragma: no cover + """Abstract movement method. + + Subclasses must override this method to update agent positions on the + grid. Implementations should use :meth:`mesa_frames.Grid.move_agents` + or similar helpers provided by the space API. + """ + raise NotImplementedError + + def eat(self) -> None: + """Agents harvest sugar from the cells they currently occupy. + + Behaviour: + - Look up the set of occupied cells (cells that reference an agent + id). + - For each occupied cell, add the cell sugar to the agent's sugar + stock and subtract the agent's metabolism cost. + - After agents harvest, set the sugar on those cells to zero (they + were consumed). + """ + # Map of currently occupied agent ids on the grid. + occupied_ids = self.index + # `occupied_ids` is a Polars Series; calling `is_in` with a Series + # of the same datatype is ambiguous in newer Polars. Use `implode` + # to collapse the Series into a list-like value for membership checks. + occupied_cells = self.space.cells.filter( + pl.col("agent_id").is_in(occupied_ids.implode()) + ) + if occupied_cells.is_empty(): + return + # The agent ordering here uses the agent_id values stored in the + # occupied cells frame; indexing the agent set with that vector updates + # the matching agents' sugar values in one vectorised write. + agent_ids = occupied_cells["agent_id"] + self[agent_ids, "sugar"] = ( + self[agent_ids, "sugar"] + + occupied_cells["sugar"] + - self[agent_ids, "metabolism"] + ) + # After harvesting, occupied cells have zero sugar. + self.space.set_cells( + occupied_cells.select(["dim_0", "dim_1"]), + {"sugar": pl.Series(np.zeros(len(occupied_cells), dtype=np.int64))}, + ) + + def _remove_starved(self) -> None: + """Discard agents whose sugar stock has fallen to zero or below. + + This method performs a vectorised filter on the agent frame and + removes any matching rows from the set. + """ + starved = self.df.filter(pl.col("sugar") <= 0) + if not starved.is_empty(): + # ``discard`` accepts a DataFrame of agents to remove. + self.discard(starved) + + +class AntsParallel(AntsBase): + def move(self) -> None: + """Move agents in parallel by ranking visible cells and resolving conflicts. + + Declarative mental model: express *what* each agent wants (ranked candidates), + then use dataframe ops to *allocate* (joins, group_by with a lottery). + Performance is handled by Polars/LazyFrames; avoid premature micro-optimisations. + + Returns + ------- + None + Movement updates happen in-place on the underlying space. + """ + # Early exit if there are no agents. + if len(self.df) == 0: + return + + # current_pos columns: + # ┌──────────┬────────────────┬────────────────┐ + # │ agent_id ┆ dim_0_center ┆ dim_1_center │ + # │ --- ┆ --- ┆ --- │ + # │ u64 ┆ i64 ┆ i64 │ + # ╞══════════╪════════════════╪════════════════╡ + current_pos = self.pos.select( + [ + pl.col("unique_id").alias("agent_id"), + pl.col("dim_0").alias("dim_0_center"), + pl.col("dim_1").alias("dim_1_center"), + ] + ) + + neighborhood = self._build_neighborhood_frame(current_pos) + choices, origins, max_rank = self._rank_candidates(neighborhood, current_pos) + if choices.is_empty(): + return + + assigned = self._resolve_conflicts_in_rounds(choices, origins, max_rank) + if assigned.is_empty(): + return + + # move_df columns: + # ┌────────────┬────────────┬────────────┐ + # │ unique_id ┆ dim_0 ┆ dim_1 │ + # │ --- ┆ --- ┆ --- │ + # │ u64 ┆ i64 ┆ i64 │ + # ╞════════════╪════════════╪════════════╡ + move_df = pl.DataFrame( + { + "unique_id": assigned["agent_id"], + "dim_0": assigned["dim_0_candidate"], + "dim_1": assigned["dim_1_candidate"], + } + ) + # `move_agents` accepts IdsLike and SpaceCoordinates (Polars Series/DataFrame), + # so pass Series/DataFrame directly rather than converting to Python lists. + self.space.move_agents(move_df["unique_id"], move_df.select(["dim_0", "dim_1"])) + + def _build_neighborhood_frame(self, current_pos: pl.DataFrame) -> pl.DataFrame: + """Assemble the sugar-weighted neighbourhood for each sensing agent. + + Parameters + ---------- + current_pos : pl.DataFrame + DataFrame with columns ``agent_id``, ``dim_0_center`` and + ``dim_1_center`` describing the current position of each agent. + + Returns + ------- + pl.DataFrame + DataFrame with columns ``agent_id``, ``radius``, ``dim_0_candidate``, + ``dim_1_candidate`` and ``sugar`` describing the visible cells for + each agent. + """ + # Build a neighbourhood frame: for each agent and visible cell we + # attach the cell sugar. The raw offsets contain the candidate + # cell coordinates and the center coordinates for the sensing agent. + # Raw neighborhood columns: + # ┌────────────┬────────────┬────────┬────────────────┬────────────────┐ + # │ dim_0 ┆ dim_1 ┆ radius ┆ dim_0_center ┆ dim_1_center │ + # │ --- ┆ --- ┆ --- ┆ --- ┆ --- │ + # │ i64 ┆ i64 ┆ i64 ┆ i64 ┆ i64 │ + # ╞════════════╪════════════╪════════╪════════════════╪════════════════╡ + neighborhood_cells = self.space.get_neighborhood( + radius=self["vision"], agents=self, include_center=True + ) + + # sugar_cells columns: + # ┌────────────┬────────────┬────────┐ + # │ dim_0 ┆ dim_1 ┆ sugar │ + # │ --- ┆ --- ┆ --- │ + # │ i64 ┆ i64 ┆ i64 │ + # ╞════════════╪════════════╪════════╡ + + sugar_cells = self.space.cells.select(["dim_0", "dim_1", "sugar"]) + + neighborhood_cells = ( + neighborhood_cells.join(sugar_cells, on=["dim_0", "dim_1"], how="left") + .with_columns(pl.col("sugar").fill_null(0)) + .rename({"dim_0": "dim_0_candidate", "dim_1": "dim_1_candidate"}) + ) + + neighborhood_cells = neighborhood_cells.join( + current_pos, + left_on=["dim_0_center", "dim_1_center"], + right_on=["dim_0_center", "dim_1_center"], + how="left", + ) + + # Final neighborhood columns: + # ┌──────────┬────────┬──────────────────┬──────────────────┬────────┐ + # │ agent_id ┆ radius ┆ dim_0_candidate ┆ dim_1_candidate ┆ sugar │ + # │ --- ┆ --- ┆ --- ┆ --- ┆ --- │ + # │ u64 ┆ i64 ┆ i64 ┆ i64 ┆ i64 │ + # ╞══════════╪════════╪══════════════════╪══════════════════╪════════╡ + neighborhood_cells = neighborhood_cells.drop( + ["dim_0_center", "dim_1_center"] + ).select(["agent_id", "radius", "dim_0_candidate", "dim_1_candidate", "sugar"]) + + return neighborhood_cells + + def _rank_candidates( + self, + neighborhood: pl.DataFrame, + current_pos: pl.DataFrame, + ) -> tuple[pl.DataFrame, pl.DataFrame, pl.DataFrame]: + """Rank candidate destination cells for each agent. + + Parameters + ---------- + neighborhood : pl.DataFrame + Output of :meth:`_build_neighborhood_frame` with columns + ``agent_id``, ``radius``, ``dim_0_candidate``, ``dim_1_candidate`` + and ``sugar``. + current_pos : pl.DataFrame + Frame with columns ``agent_id``, ``dim_0_center`` and + ``dim_1_center`` describing where each agent currently stands. + + Returns + ------- + choices : pl.DataFrame + Ranked candidates per agent with columns ``agent_id``, + ``dim_0_candidate``, ``dim_1_candidate``, ``sugar``, ``radius`` and + ``rank``. + origins : pl.DataFrame + Original coordinates per agent with columns ``agent_id``, + ``dim_0`` and ``dim_1``. + max_rank : pl.DataFrame + Maximum available rank per agent with columns ``agent_id`` and + ``max_rank``. + """ + # Create ranked choices per agent: sort by sugar (desc), radius + # (asc), then coordinates. Keep the first unique entry per cell. + # choices columns (after select): + # ┌──────────┬──────────────────┬──────────────────┬────────┬────────┐ + # │ agent_id ┆ dim_0_candidate ┆ dim_1_candidate ┆ sugar ┆ radius │ + # │ --- ┆ --- ┆ --- ┆ --- ┆ --- │ + # │ u64 ┆ i64 ┆ i64 ┆ i64 ┆ i64 │ + # ╞══════════╪══════════════════╪══════════════════╪════════╪════════╡ + choices = ( + neighborhood.select( + [ + "agent_id", + "dim_0_candidate", + "dim_1_candidate", + "sugar", + "radius", + ] + ) + .sort( + ["agent_id", "sugar", "radius", "dim_0_candidate", "dim_1_candidate"], + descending=[False, True, False, False, False], + ) + .unique( + subset=["agent_id", "dim_0_candidate", "dim_1_candidate"], + keep="first", + maintain_order=True, + ) + .with_columns(pl.col("agent_id").cum_count().over("agent_id").alias("rank")) + ) + + # Precompute per‑agent candidate rank once so conflict resolution can + # promote losers by incrementing a cheap `current_rank` counter, + # without re-sorting after each round. Alternative: drop taken cells + # and re-rank by sugar every round; simpler conceptually but requires + # repeated sorts and deduplication, which is heavier than filtering by + # `rank >= current_rank`. + + # Origins for fallback (if an agent exhausts candidates it stays put). + # origins columns: + # ┌──────────┬────────────┬────────────┐ + # │ agent_id ┆ dim_0 ┆ dim_1 │ + # │ --- ┆ --- ┆ --- │ + # │ u64 ┆ i64 ┆ i64 │ + # ╞══════════╪════════════╪════════════╡ + origins = current_pos.select( + [ + "agent_id", + pl.col("dim_0_center").alias("dim_0"), + pl.col("dim_1_center").alias("dim_1"), + ] + ) + + # Track the maximum available rank per agent to clamp promotions. + # This bounds `current_rank`; once an agent reaches `max_rank` and + # cannot secure a cell, they fall back to origin cleanly instead of + # chasing nonexistent ranks. + # max_rank columns: + # ┌──────────┬───────────┐ + # │ agent_id ┆ max_rank │ + # │ --- ┆ --- │ + # │ u64 ┆ u32 │ + # ╞══════════╪═══════════╡ + max_rank = choices.group_by("agent_id").agg( + pl.col("rank").max().alias("max_rank") + ) + return choices, origins, max_rank + + def _resolve_conflicts_in_rounds( + self, + choices: pl.DataFrame, + origins: pl.DataFrame, + max_rank: pl.DataFrame, + ) -> pl.DataFrame: + """Resolve movement conflicts through iterative lottery rounds. + + Parameters + ---------- + choices : pl.DataFrame + Ranked candidate cells per agent with headers matching the + ``choices`` frame returned by :meth:`_rank_candidates`. + origins : pl.DataFrame + Agent origin coordinates with columns ``agent_id``, ``dim_0`` and + ``dim_1``. + max_rank : pl.DataFrame + Maximum rank offset per agent with columns ``agent_id`` and + ``max_rank``. + + Returns + ------- + pl.DataFrame + Allocated movements with columns ``agent_id``, ``dim_0_candidate`` + and ``dim_1_candidate``; each row records the destination assigned + to an agent. + """ + # Prepare unresolved agents and working tables. + agent_ids = choices["agent_id"].unique(maintain_order=True) + + # unresolved columns: + # ┌──────────┬────────────────┐ + # │ agent_id ┆ current_rank │ + # │ --- ┆ --- │ + # │ u64 ┆ i64 │ + # ╞══════════╪════════════════╡ + unresolved = pl.DataFrame( + { + "agent_id": agent_ids, + "current_rank": pl.Series(np.zeros(len(agent_ids), dtype=np.int64)), + } + ) + + # assigned columns: + # ┌──────────┬──────────────────┬──────────────────┐ + # │ agent_id ┆ dim_0_candidate ┆ dim_1_candidate │ + # │ --- ┆ --- ┆ --- │ + # │ u64 ┆ i64 ┆ i64 │ + # ╞══════════╪══════════════════╪══════════════════╡ + assigned = pl.DataFrame( + { + "agent_id": pl.Series( + name="agent_id", values=[], dtype=agent_ids.dtype + ), + "dim_0_candidate": pl.Series( + name="dim_0_candidate", values=[], dtype=pl.Int64 + ), + "dim_1_candidate": pl.Series( + name="dim_1_candidate", values=[], dtype=pl.Int64 + ), + } + ) + + # taken columns: + # ┌──────────────────┬──────────────────┐ + # │ dim_0_candidate ┆ dim_1_candidate │ + # │ --- ┆ --- │ + # │ i64 ┆ i64 │ + # ╞══════════════════╪══════════════════╡ + # Treat all currently occupied cells (origins) as taken from the start. + # Each agent may still target its own origin; we handle that exception + # when filtering candidate pools. + taken = origins.select( + [ + pl.col("dim_0").alias("dim_0_candidate"), + pl.col("dim_1").alias("dim_1_candidate"), + ] + ) + origins_for_filter = origins.rename( + {"dim_0": "dim_0_origin", "dim_1": "dim_1_origin"} + ) + + # Resolve in rounds: each unresolved agent proposes its current-ranked + # candidate; winners per-cell are selected at random and losers are + # promoted to their next choice. + while unresolved.height > 0: + # Using precomputed `rank` lets us select candidates with + # `rank >= current_rank` and avoid re-ranking after each round. + # Alternative: remove taken cells and re-sort remaining candidates + # by sugar/distance per round (heavier due to repeated sort/dedupe). + # candidate_pool columns (after join with unresolved): + # ┌──────────┬──────────────────┬──────────────────┬────────┬────────┬──────┬──────────────┐ + # │ agent_id ┆ dim_0_candidate ┆ dim_1_candidate ┆ sugar ┆ radius ┆ rank ┆ current_rank │ + # │ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │ + # │ u64 ┆ i64 ┆ i64 ┆ i64 ┆ i64 ┆ u32 ┆ i64 │ + # ╞══════════╪══════════════════╪══════════════════╪════════╪════════╪══════╪══════════════╡ + candidate_pool = choices.join(unresolved, on="agent_id") + candidate_pool = candidate_pool.filter( + pl.col("rank") >= pl.col("current_rank") + ) + candidate_pool = ( + candidate_pool.join(origins_for_filter, on="agent_id", how="left") + .join( + taken.with_columns(pl.lit(True).alias("is_taken")), + on=["dim_0_candidate", "dim_1_candidate"], + how="left", + ) + .filter( + pl.col("is_taken").is_null() + | ( + (pl.col("dim_0_candidate") == pl.col("dim_0_origin")) + & (pl.col("dim_1_candidate") == pl.col("dim_1_origin")) + ) + ) + .drop(["dim_0_origin", "dim_1_origin", "is_taken"]) + ) + + if candidate_pool.is_empty(): + # No available candidates — everyone falls back to origin. + # Note: this covers both agents with no visible cells left and + # the case where all remaining candidates are already taken. + # fallback columns: + # ┌──────────┬────────────┬────────────┬──────────────┐ + # │ agent_id ┆ dim_0 ┆ dim_1 ┆ current_rank │ + # │ --- ┆ --- ┆ --- ┆ --- │ + # │ u64 ┆ i64 ┆ i64 ┆ i64 │ + # ╞══════════╪════════════╪════════════╪══════════════╡ + fallback = unresolved.join(origins, on="agent_id", how="left") + assigned = pl.concat( + [ + assigned, + fallback.select( + [ + "agent_id", + pl.col("dim_0").alias("dim_0_candidate"), + pl.col("dim_1").alias("dim_1_candidate"), + ] + ), + ], + how="vertical", + ) + break + + # best_candidates columns (per agent first choice): + # ┌──────────┬──────────────────┬──────────────────┬────────┬────────┬──────┬──────────────┐ + # │ agent_id ┆ dim_0_candidate ┆ dim_1_candidate ┆ sugar ┆ radius ┆ rank ┆ current_rank │ + # │ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │ + # │ u64 ┆ i64 ┆ i64 ┆ i64 ┆ i64 ┆ u32 ┆ i64 │ + # ╞══════════╪══════════════════╪══════════════════╪════════╪════════╪══════╪══════════════╡ + best_candidates = ( + candidate_pool.sort(["agent_id", "rank"]) + .group_by("agent_id", maintain_order=True) + .first() + ) + + # Agents that had no candidate this round fall back to origin. + # missing columns: + # ┌──────────┬──────────────┐ + # │ agent_id ┆ current_rank │ + # │ --- ┆ --- │ + # │ u64 ┆ i64 │ + # ╞══════════╪══════════════╡ + missing = unresolved.join( + best_candidates.select("agent_id"), on="agent_id", how="anti" + ) + if not missing.is_empty(): + # fallback (missing) columns match fallback table above. + fallback = missing.join(origins, on="agent_id", how="left") + assigned = pl.concat( + [ + assigned, + fallback.select( + [ + "agent_id", + pl.col("dim_0").alias("dim_0_candidate"), + pl.col("dim_1").alias("dim_1_candidate"), + ] + ), + ], + how="vertical", + ) + unresolved = unresolved.join( + missing.select("agent_id"), on="agent_id", how="anti" + ) + best_candidates = best_candidates.join( + missing.select("agent_id"), on="agent_id", how="anti" + ) + if unresolved.is_empty() or best_candidates.is_empty(): + continue + + # Add a small random lottery to break ties deterministically for + # each candidate set. + lottery = pl.Series("lottery", self.random.random(best_candidates.height)) + best_candidates = best_candidates.with_columns(lottery) + + # winners columns: + # ┌──────────┬──────────────────┬──────────────────┬────────┬────────┬──────┬──────────────┬─────────┐ + # │ agent_id ┆ dim_0_candidate ┆ dim_1_candidate ┆ sugar ┆ radius ┆ rank ┆ current_rank │ lottery │ + # │ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ f64 │ + # ╞══════════╪══════════════════╪══════════════════╪════════╪════════╪══════╪══════════════╪═════════╡ + winners = ( + best_candidates.sort( + ["dim_0_candidate", "dim_1_candidate", "lottery"], + ) + .group_by(["dim_0_candidate", "dim_1_candidate"], maintain_order=True) + .first() + ) + + assigned = pl.concat( + [ + assigned, + winners.select( + [ + "agent_id", + pl.col("dim_0_candidate"), + pl.col("dim_1_candidate"), + ] + ), + ], + how="vertical", + ) + taken = pl.concat( + [ + taken, + winners.select(["dim_0_candidate", "dim_1_candidate"]), + ], + how="vertical", + ) + + # Origins of agents that move away become available to others in + # subsequent rounds. Keep origins for agents that stayed put. + vacated = ( + winners.join(origins_for_filter, on="agent_id", how="left") + .filter( + (pl.col("dim_0_candidate") != pl.col("dim_0_origin")) + | (pl.col("dim_1_candidate") != pl.col("dim_1_origin")) + ) + .select( + pl.col("dim_0_origin").alias("dim_0_candidate"), + pl.col("dim_1_origin").alias("dim_1_candidate"), + ) + ) + if not vacated.is_empty(): + taken = taken.join( + vacated, + on=["dim_0_candidate", "dim_1_candidate"], + how="anti", + ) + + winner_ids = winners.select("agent_id") + unresolved = unresolved.join(winner_ids, on="agent_id", how="anti") + if unresolved.is_empty(): + break + + # loser candidates columns mirror best_candidates (minus winners). + losers = best_candidates.join(winner_ids, on="agent_id", how="anti") + if losers.is_empty(): + continue + + # loser_updates columns (after select): + # ┌──────────┬───────────┐ + # │ agent_id ┆ next_rank │ + # │ --- ┆ --- │ + # │ u64 ┆ i64 │ + # ╞══════════╪═══════════╡ + loser_updates = ( + losers.select( + "agent_id", + (pl.col("rank") + 1).cast(pl.Int64).alias("next_rank"), + ) + .join(max_rank, on="agent_id", how="left") + .with_columns( + pl.min_horizontal(pl.col("next_rank"), pl.col("max_rank")).alias( + "next_rank" + ) + ) + .select(["agent_id", "next_rank"]) + ) + + # Promote losers' current_rank (if any) and continue. + # unresolved (updated) retains columns agent_id/current_rank. + unresolved = ( + unresolved.join(loser_updates, on="agent_id", how="left") + .with_columns( + pl.when(pl.col("next_rank").is_not_null()) + .then(pl.col("next_rank")) + .otherwise(pl.col("current_rank")) + .alias("current_rank") + ) + .drop("next_rank") + ) + + return assigned + + +__all__ = [ + "AntsBase", + "AntsParallel", +] diff --git a/examples/sugarscape_ig/backend_frames/model.py b/examples/sugarscape_ig/backend_frames/model.py new file mode 100644 index 00000000..c51122fe --- /dev/null +++ b/examples/sugarscape_ig/backend_frames/model.py @@ -0,0 +1,497 @@ +"""Mesa-frames implementation of Sugarscape IG with Typer CLI. + +This mirrors the advanced tutorial in docs/general/tutorials/3_advanced_tutorial.py +and exposes a simple CLI to run the parallel update variant, save CSVs, and plot +the Gini trajectory. +""" + +from __future__ import annotations + +from datetime import datetime, timezone +import os +from pathlib import Path +from typing import Annotated +from time import perf_counter + +import numpy as np +import polars as pl +import typer + +from mesa_frames import DataCollector, Grid, Model +from examples.utils import FramesSimulationResult +from examples.plotting import plot_model_metrics + +from examples.sugarscape_ig.backend_frames.agents import AntsBase, AntsParallel + + +# Model-level reporters + + +def gini(model: Model) -> float: + """Compute the Gini coefficient of agent sugar holdings. + + The function reads the primary agent set from ``model.sets[0]`` and + computes the population Gini coefficient on the ``sugar`` column. The + implementation is robust to empty sets and zero-total sugar. + + Parameters + ---------- + model : Model + The simulation model that contains agent sets. The primary agent set + is expected to be at ``model.sets[0]`` and to expose a Polars DataFrame + under ``.df`` with a ``sugar`` column. + + Returns + ------- + float + Gini coefficient in the range [0, 1] if defined, ``0.0`` when the + total sugar is zero, and ``nan`` when the agent set is empty or too + small to measure. + """ + if len(model.sets) == 0: + return float("nan") + + primary_set = model.sets[0] + if len(primary_set) == 0: + return float("nan") + + sugar = primary_set.df["sugar"].to_numpy().astype(np.float64) + sorted_vals = np.sort(sugar) + n = sorted_vals.size + if n == 0: + return float("nan") + cumulative = np.cumsum(sorted_vals) + total = cumulative[-1] + if total == 0: + return 0.0 + index = np.arange(1, n + 1, dtype=np.float64) + return float((2.0 * np.dot(index, sorted_vals) / (n * total)) - (n + 1) / n) + + +def corr_sugar_metabolism(model: Model) -> float: + """Pearson correlation between agent sugar and metabolism. + + This reporter extracts the ``sugar`` and ``metabolism`` columns from the + primary agent set and returns their Pearson correlation coefficient. When + the agent set is empty or contains insufficient variation the function + returns ``nan``. + + Parameters + ---------- + model : Model + The simulation model that contains agent sets. The primary agent set + is expected to be at ``model.sets[0]`` and provide a Polars DataFrame + with ``sugar`` and ``metabolism`` columns. + + Returns + ------- + float + Pearson correlation coefficient between sugar and metabolism, or + ``nan`` when the correlation is undefined (empty set or constant + values). + """ + if len(model.sets) == 0: + return float("nan") + + primary_set = model.sets[0] + if len(primary_set) == 0: + return float("nan") + + agent_df = primary_set.df + sugar = agent_df["sugar"].to_numpy().astype(np.float64) + metabolism = agent_df["metabolism"].to_numpy().astype(np.float64) + return _safe_corr(sugar, metabolism) + + +def corr_sugar_vision(model: Model) -> float: + """Pearson correlation between agent sugar and vision. + + Extracts the ``sugar`` and ``vision`` columns from the primary agent set + and returns their Pearson correlation coefficient. If the reporter cannot + compute a meaningful correlation (for example, when the agent set is + empty or values are constant) it returns ``nan``. + + Parameters + ---------- + model : Model + The simulation model that contains agent sets. The primary agent set + is expected to be at ``model.sets[0]`` and provide a Polars DataFrame + with ``sugar`` and ``vision`` columns. + + Returns + ------- + float + Pearson correlation coefficient between sugar and vision, or ``nan`` + when the correlation is undefined. + """ + if len(model.sets) == 0: + return float("nan") + + primary_set = model.sets[0] + if len(primary_set) == 0: + return float("nan") + + agent_df = primary_set.df + sugar = agent_df["sugar"].to_numpy().astype(np.float64) + vision = agent_df["vision"].to_numpy().astype(np.float64) + return _safe_corr(sugar, vision) + + +def _safe_corr(x: np.ndarray, y: np.ndarray) -> float: + """Safely compute Pearson correlation between two 1-D arrays. + + This helper guards against degenerate inputs (too few observations or + constant arrays) which would make the Pearson correlation undefined or + numerically unstable. When a valid correlation can be computed the + function returns a Python float. + + Parameters + ---------- + x : np.ndarray + One-dimensional numeric array containing the first variable to + correlate. + y : np.ndarray + One-dimensional numeric array containing the second variable to + correlate. + + Returns + ------- + float + Pearson correlation coefficient as a Python float, or ``nan`` if the + correlation is undefined (fewer than 2 observations or constant + inputs). + """ + if x.size < 2 or y.size < 2: + return float("nan") + if np.allclose(x, x[0]) or np.allclose(y, y[0]): + return float("nan") + return float(np.corrcoef(x, y)[0, 1]) + + +class Sugarscape(Model): + """Minimal Sugarscape model used throughout the tutorial. + + This class wires together a grid that stores ``sugar`` per cell, an + agent set implementation (passed in as ``agent_type``), and a + data collector that records model- and agent-level statistics. + + The model's responsibilities are to: + - create the sugar landscape (cells with current and maximum sugar) + - create and place agents on the grid + - advance the sugar regrowth rule each step + - run the model for a fixed number of steps and collect data + + Parameters + ---------- + agent_type : type[AntsBase] + The :class:`AgentSet` subclass implementing the movement rules + (sequential, numba-accelerated, or parallel). + n_agents : int + Number of agents to create and place on the grid. + width : int + Grid width (number of columns). + height : int + Grid height (number of rows). + max_sugar : int, optional + Upper bound for the randomly initialised sugar values on the grid, + by default 4. + seed : int | None, optional + RNG seed to make runs reproducible across variants, by default None. + results_dir : Path | None, optional + Optional directory where CSV/plot outputs will be written. If ``None`` + the model runs without persisting CSVs to disk (in-memory storage). + + Notes + ----- + The grid uses a von Neumann neighbourhood and capacity 1 (at most one + agent per cell). Both the sugar landscape and initial agent traits are + drawn from ``self.random`` so different movement variants can be + instantiated with identical initial conditions by passing the same seed. + """ + + def __init__( + self, + agent_type: type[AntsBase], + n_agents: int, + *, + width: int, + height: int, + max_sugar: int = 4, + seed: int | None = None, + results_dir: Path | None = None, + ) -> None: + if n_agents > width * height: + raise ValueError( + "Cannot place more agents than grid cells when capacity is 1." + ) + super().__init__(seed) + + # 1. Let's create the sugar grid and set up the space + + sugar_grid_df = self._generate_sugar_grid(width, height, max_sugar) + self.space = Grid( + self, [width, height], neighborhood_type="von_neumann", capacity=1 + ) + self.space.set_cells(sugar_grid_df) + self._max_sugar = sugar_grid_df.select(["dim_0", "dim_1", "max_sugar"]) + + # 2. Now we create the agents and place them on the grid + + agent_frame = self._generate_agent_frame(n_agents) + main_set = agent_type(self, agent_frame) + self.sets += main_set + self.space.place_to_empty(self.sets) + + # 3. Finally we set up the data collector + # Benchmarks may run without providing a results_dir; in that case avoid forcing + # a CSV storage backend (which requires a storage_uri) and keep data in memory. + if results_dir is None: + storage = "memory" + storage_uri = None + else: + storage = "csv" + storage_uri = str(results_dir) + self.datacollector = DataCollector( + model=self, + model_reporters={ + "mean_sugar": lambda m: 0.0 + if len(m.sets[0]) == 0 + else float(m.sets[0].df["sugar"].mean()), + "total_sugar": lambda m: float(m.sets[0].df["sugar"].sum()) + if len(m.sets[0]) + else 0.0, + "agents_alive": lambda m: float(len(m.sets[0])) if len(m.sets) else 0.0, + "gini": gini, + "corr_sugar_metabolism": corr_sugar_metabolism, + "corr_sugar_vision": corr_sugar_vision, + }, + agent_reporters={ + "sugar": "sugar", + "metabolism": "metabolism", + "vision": "vision", + }, + storage=storage, + storage_uri=storage_uri, + ) + self.datacollector.collect() + + def _generate_sugar_grid( + self, width: int, height: int, max_sugar: int + ) -> pl.DataFrame: + """Generate a random sugar grid. + + Parameters + ---------- + width : int + Grid width (number of columns). + height : int + Grid height (number of rows). + max_sugar : int + Maximum sugar value (inclusive) for each cell. + + Returns + ------- + pl.DataFrame + DataFrame with columns ``dim_0``, ``dim_1``, ``sugar`` (current + amount) and ``max_sugar`` (regrowth target). + """ + sugar_vals = self.random.integers( + 0, max_sugar + 1, size=(width, height), dtype=np.int64 + ) + dim_0 = pl.Series("dim_0", pl.arange(width, eager=True)).to_frame() + dim_1 = pl.Series("dim_1", pl.arange(height, eager=True)).to_frame() + return dim_0.join(dim_1, how="cross").with_columns( + sugar=sugar_vals.flatten(), max_sugar=sugar_vals.flatten() + ) + + def _generate_agent_frame(self, n_agents: int) -> pl.DataFrame: + """Create the initial agent frame populated with agent traits. + + Parameters + ---------- + n_agents : int + Number of agents to create. + + Returns + ------- + pl.DataFrame + DataFrame with columns ``sugar``, ``metabolism`` and ``vision`` + (integer values) for each agent. + """ + rng = self.random + return pl.DataFrame( + { + "sugar": rng.integers(6, 25, size=n_agents, dtype=np.int64), + "metabolism": rng.integers(2, 5, size=n_agents, dtype=np.int64), + "vision": rng.integers(1, 6, size=n_agents, dtype=np.int64), + } + ) + + def step(self) -> None: + """Advance the model by one step. + + Notes + ----- + The per-step ordering is important and this tutorial implements the + classic Sugarscape "instant growback": agents move and eat first, + and then empty cells are refilled immediately (move -> eat -> regrow + -> collect). + """ + if len(self.sets[0]) == 0: + self.running = False + return + self.sets[0].step() + self._advance_sugar_field() + self.datacollector.collect() + if len(self.sets[0]) == 0: + self.running = False + + def run(self, steps: int) -> None: + """Run the model for a fixed number of steps. + + Parameters + ---------- + steps : int + Maximum number of steps to run. The model may terminate earlier if + ``self.running`` is set to ``False`` (for example, when all agents + have died). + """ + for _ in range(steps): + if not self.running: + break + self.step() + + def _advance_sugar_field(self) -> None: + """Apply the instant-growback sugar regrowth rule. + + Empty cells (no agent present) are refilled to their ``max_sugar`` + value. Cells that are occupied are set to zero because agents harvest + the sugar when they eat. The method uses vectorised DataFrame joins + and writes to keep the operation efficient. + """ + empty_cells = self.space.empty_cells + if not empty_cells.is_empty(): + # Look up the maximum sugar for each empty cell and restore it. + refresh = empty_cells.join( + self._max_sugar, on=["dim_0", "dim_1"], how="left" + ) + self.space.set_cells(empty_cells, {"sugar": refresh["max_sugar"]}) + full_cells = self.space.full_cells + if not full_cells.is_empty(): + # Occupied cells have just been harvested; set their sugar to 0. + zeros = pl.Series(np.zeros(len(full_cells), dtype=np.int64)) + self.space.set_cells(full_cells, {"sugar": zeros}) + + +def simulate( + *, + agents: int, + steps: int, + width: int, + height: int, + max_sugar: int = 4, + seed: int | None = None, + results_dir: Path | None = None, +) -> FramesSimulationResult: + model = Sugarscape( + agent_type=AntsParallel, + n_agents=agents, + width=width, + height=height, + max_sugar=max_sugar, + seed=seed, + results_dir=results_dir, + ) + model.run(steps) + return FramesSimulationResult(datacollector=model.datacollector) + + +app = typer.Typer(add_completion=False) + + +@app.command() +def run( + agents: Annotated[int, typer.Option(help="Number of agents to simulate.")] = 400, + width: Annotated[int, typer.Option(help="Grid width (columns).")] = 40, + height: Annotated[int, typer.Option(help="Grid height (rows).")] = 40, + steps: Annotated[int, typer.Option(help="Number of model steps to run.")] = 60, + max_sugar: Annotated[int, typer.Option(help="Maximum sugar per cell.")] = 4, + seed: Annotated[int | None, typer.Option(help="Optional RNG seed.")] = None, + plot: Annotated[bool, typer.Option(help="Render Seaborn plots.")] = True, + save_results: Annotated[bool, typer.Option(help="Persist metrics as CSV.")] = True, + results_dir: Annotated[ + Path | None, + typer.Option( + help="Directory to write CSV results and plots into. If omitted a timestamped subdir under `results/` is used." + ), + ] = None, +) -> None: + typer.echo( + f"Running Sugarscape IG (mesa-frames, parallel) with {agents} agents on {width}x{height} for {steps} steps" + ) + runtime_typechecking = os.environ.get("MESA_FRAMES_RUNTIME_TYPECHECKING", "") + if runtime_typechecking and runtime_typechecking.lower() not in {"0", "false"}: + typer.secho( + "Warning: MESA_FRAMES_RUNTIME_TYPECHECKING is enabled; this run will be slower.", + fg=typer.colors.YELLOW, + ) + timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S") + if results_dir is None: + results_dir = ( + Path(__file__).resolve().parent / "results" / timestamp + ).resolve() + results_dir.mkdir(parents=True, exist_ok=True) + + start_time = perf_counter() + result = simulate( + agents=agents, + steps=steps, + width=width, + height=height, + max_sugar=max_sugar, + seed=seed, + results_dir=results_dir, + ) + typer.echo(f"Simulation complete in {perf_counter() - start_time:.2f} seconds") + + model_metrics = result.datacollector.data["model"].drop(["seed", "batch"]) + typer.echo(f"Metrics in the final 5 steps: {model_metrics.tail(5)}") + + if save_results: + result.datacollector.flush() + typer.echo(f"Saved CSV results under {results_dir}") + + if plot: + # Create a subdirectory for per-metric plots under the timestamped + # results directory. For each column in the model metrics (except + # the step index) create a single-metric DataFrame and call the + # shared plotting helper to export light/dark PNG+SVG variants. + plots_dir = results_dir / "plots" + plots_dir.mkdir(parents=True, exist_ok=True) + + # Determine which columns to plot (preserve 'step' if present). + value_cols = [c for c in model_metrics.columns if c != "step"] + for col in value_cols: + stem = f"{col}_{timestamp}" + single = ( + model_metrics.select(["step", col]) + if "step" in model_metrics.columns + else model_metrics.select([col]) + ) + plot_model_metrics( + single, + plots_dir, + stem, + title=f"Sugarscape IG — {col.capitalize()}", + subtitle=f"mesa-frames backend; seed={result.datacollector.seed}", + agents=agents, + steps=steps, + ) + + typer.echo(f"Saved plots under {plots_dir}") + + # Skip CSV-saved confirmation when results are not persisted + + +if __name__ == "__main__": + app() diff --git a/examples/sugarscape_ig/backend_mesa/__init__.py b/examples/sugarscape_ig/backend_mesa/__init__.py new file mode 100644 index 00000000..463099c0 --- /dev/null +++ b/examples/sugarscape_ig/backend_mesa/__init__.py @@ -0,0 +1 @@ +"""Mesa backend package for Sugarscape IG examples.""" diff --git a/examples/sugarscape_ig/backend_mesa/agents.py b/examples/sugarscape_ig/backend_mesa/agents.py new file mode 100644 index 00000000..b8626d20 --- /dev/null +++ b/examples/sugarscape_ig/backend_mesa/agents.py @@ -0,0 +1,81 @@ +"""Mesa agents for the Sugarscape IG example (sequential/asynchronous update). + +Implements the movement rule (sense along cardinal axes up to `vision`, choose +highest-sugar cell with tie-breakers by distance and coordinates). Eating, +starvation, and regrowth are orchestrated by the model to preserve the order +move -> eat -> regrow -> collect, mirroring the tutorial schedule. +""" + +from __future__ import annotations + +from typing import Tuple + +import mesa + + +class AntAgent(mesa.Agent): + """Sugarscape ant with sugar/metabolism/vision traits and movement.""" + + def __init__( + self, + model: mesa.Model, + *, + sugar: int, + metabolism: int, + vision: int, + ) -> None: + super().__init__(model) + self.sugar = int(sugar) + self.metabolism = int(metabolism) + self.vision = int(vision) + + # --- Movement helpers (sequential/asynchronous) --- + + def _visible_cells(self, origin: tuple[int, int]) -> list[tuple[int, int]]: + x0, y0 = origin + width, height = self.model.width, self.model.height + cells: list[tuple[int, int]] = [origin] + for step in range(1, self.vision + 1): + if x0 + step < width: + cells.append((x0 + step, y0)) + if x0 - step >= 0: + cells.append((x0 - step, y0)) + if y0 + step < height: + cells.append((x0, y0 + step)) + if y0 - step >= 0: + cells.append((x0, y0 - step)) + return cells + + def _choose_best_cell(self, origin: tuple[int, int]) -> tuple[int, int]: + # Highest sugar; tie-break by Manhattan distance from origin; then coords. + best_cell = origin + best_sugar = int(self.model.sugar_current[origin[0], origin[1]]) + best_distance = 0 + ox, oy = origin + for cx, cy in self._visible_cells(origin): + # Block occupied cells except the origin (own cell allowed as fallback). + if (cx, cy) != origin and not self.model.grid.is_cell_empty((cx, cy)): + continue + sugar_here = int(self.model.sugar_current[cx, cy]) + distance = abs(cx - ox) + abs(cy - oy) + better = False + if sugar_here > best_sugar: + better = True + elif sugar_here == best_sugar: + if distance < best_distance: + better = True + elif distance == best_distance and (cx, cy) < best_cell: + better = True + if better: + best_cell = (cx, cy) + best_sugar = sugar_here + best_distance = distance + return best_cell + + def move(self) -> None: + best = self._choose_best_cell(self.pos) + if best != self.pos: + self.model.grid.move_agent(self, best) + + +__all__ = ["AntAgent"] diff --git a/examples/sugarscape_ig/backend_mesa/model.py b/examples/sugarscape_ig/backend_mesa/model.py new file mode 100644 index 00000000..5b7bbf90 --- /dev/null +++ b/examples/sugarscape_ig/backend_mesa/model.py @@ -0,0 +1,312 @@ +"""Mesa implementation of Sugarscape IG with Typer CLI (sequential update). + +Follows the same structure as the Boltzmann Mesa example: `simulate()` and a +`run` CLI command that saves CSV results and plots the Gini trajectory. The +model updates in the order move -> eat -> regrow -> collect, matching the +tutorial schedule. +""" + +from __future__ import annotations + +from datetime import datetime, timezone +from pathlib import Path +from typing import Annotated +from collections.abc import Iterable +from time import perf_counter + +import mesa +from mesa.datacollection import DataCollector +from mesa.space import SingleGrid +import numpy as np +import polars as pl +import typer + +from examples.utils import MesaSimulationResult +from examples.plotting import plot_model_metrics + +from examples.sugarscape_ig.backend_mesa.agents import AntAgent + + +def _safe_corr(x: np.ndarray, y: np.ndarray) -> float: + """Safely compute Pearson correlation between two 1-D arrays. + + Mirrors the Frames helper: returns nan for degenerate inputs. + """ + x = np.asarray(x, dtype=float) + y = np.asarray(y, dtype=float) + if x.size < 2 or y.size < 2: + return float("nan") + if np.allclose(x, x[0]) or np.allclose(y, y[0]): + return float("nan") + return float(np.corrcoef(x, y)[0, 1]) + + +def corr_sugar_metabolism(model: Sugarscape) -> float: + sugars = np.fromiter((a.sugar for a in model.agent_list), dtype=float) + mets = np.fromiter((a.metabolism for a in model.agent_list), dtype=float) + return _safe_corr(sugars, mets) + + +def corr_sugar_vision(model: Sugarscape) -> float: + sugars = np.fromiter((a.sugar for a in model.agent_list), dtype=float) + vision = np.fromiter((a.vision for a in model.agent_list), dtype=float) + return _safe_corr(sugars, vision) + + +def gini(values: Iterable[float]) -> float: + array = np.fromiter(values, dtype=float) + if array.size == 0: + return float("nan") + if np.allclose(array, 0.0): + return 0.0 + if np.allclose(array, array[0]): + return 0.0 + sorted_vals = np.sort(array) + n = sorted_vals.size + cumulative = np.cumsum(sorted_vals) + total = cumulative[-1] + if total == 0: + return 0.0 + index = np.arange(1, n + 1, dtype=float) + return float((2.0 * np.dot(index, sorted_vals) / (n * total)) - (n + 1) / n) + + +class Sugarscape(mesa.Model): + def __init__( + self, + agents: int, + *, + width: int, + height: int, + max_sugar: int = 4, + seed: int | None = None, + ) -> None: + super().__init__() + if seed is None: + seed = self.random.randint(0, np.iinfo(np.int32).max) + self.reset_randomizer(seed) + self.width = int(width) + self.height = int(height) + + # Sugar field (current and max) as 2D arrays shaped (width, height) + numpy_rng = np.random.default_rng(seed) + self.sugar_max = numpy_rng.integers( + 0, max_sugar + 1, size=(width, height), dtype=np.int64 + ) + self.sugar_current = self.sugar_max.copy() + + # Grid with capacity 1 per cell + self.grid = SingleGrid(width, height, torus=False) + + # Agents (Python list, manually shuffled/iterated for speed) + self.agent_list: list[AntAgent] = [] + # Place all agents on empty cells; also draw initial traits from model RNG + placed = 0 + while placed < agents: + x = int(self.random.randrange(0, width)) + y = int(self.random.randrange(0, height)) + if self.grid.is_cell_empty((x, y)): + a = AntAgent( + self, + sugar=int(self.random.randint(6, 25)), + metabolism=int(self.random.randint(2, 5)), + vision=int(self.random.randint(1, 6)), + ) + self.grid.place_agent(a, (x, y)) + self.agent_list.append(a) + placed += 1 + + # Model-level reporters mirroring the Frames implementation so CSVs + # are comparable across backends. + self.datacollector = DataCollector( + model_reporters={ + "mean_sugar": lambda m: float(np.mean([a.sugar for a in m.agent_list])) + if m.agent_list + else 0.0, + "total_sugar": lambda m: float(sum(a.sugar for a in m.agent_list)) + if m.agent_list + else 0.0, + "agents_alive": lambda m: float(len(m.agent_list)), + "gini": lambda m: gini(a.sugar for a in m.agent_list), + "corr_sugar_metabolism": lambda m: corr_sugar_metabolism(m), + "corr_sugar_vision": lambda m: corr_sugar_vision(m), + "seed": lambda m: seed, + }, + agent_reporters={ + "traits": lambda a: { + "sugar": a.sugar, + "metabolism": a.metabolism, + "vision": a.vision, + } + }, + ) + self.datacollector.collect(self) + + # --- Scheduling --- + + def _harvest_and_survive(self) -> None: + survivors: list[AntAgent] = [] + for a in self.agent_list: + x, y = a.pos + a.sugar += int(self.sugar_current[x, y]) + a.sugar -= a.metabolism + # Harvested cells are emptied now; they will be refilled if empty. + self.sugar_current[x, y] = 0 + if a.sugar > 0: + survivors.append(a) + else: + # Remove dead agent from grid + self.grid.remove_agent(a) + self.agent_list = survivors + + def _regrow(self) -> None: + # Empty cells regrow to max; occupied cells set to 0 (already zeroed on harvest) + for x in range(self.width): + for y in range(self.height): + if self.grid.is_cell_empty((x, y)): + self.sugar_current[x, y] = self.sugar_max[x, y] + else: + self.sugar_current[x, y] = 0 + + def step(self) -> None: + # Randomise order, move sequentially, then eat/starve, regrow, collect + self.random.shuffle(self.agent_list) + for a in self.agent_list: + a.move() + self._harvest_and_survive() + self._regrow() + self.datacollector.collect(self) + if not self.agent_list: + self.running = False + + def run(self, steps: int) -> None: + for _ in range(steps): + if not getattr(self, "running", True): + break + self.step() + + +def simulate( + *, + agents: int, + steps: int, + width: int, + height: int, + max_sugar: int = 4, + seed: int | None = None, +) -> MesaSimulationResult: + model = Sugarscape( + agents, width=width, height=height, max_sugar=max_sugar, seed=seed + ) + model.run(steps) + return MesaSimulationResult(datacollector=model.datacollector) + + +app = typer.Typer(add_completion=False) + + +@app.command() +def run( + agents: Annotated[int, typer.Option(help="Number of agents to simulate.")] = 400, + width: Annotated[int, typer.Option(help="Grid width (columns).")] = 40, + height: Annotated[int, typer.Option(help="Grid height (rows).")] = 40, + steps: Annotated[int, typer.Option(help="Number of model steps to run.")] = 60, + max_sugar: Annotated[int, typer.Option(help="Maximum sugar per cell.")] = 4, + seed: Annotated[int | None, typer.Option(help="Optional RNG seed.")] = None, + plot: Annotated[bool, typer.Option(help="Render plots.")] = True, + save_results: Annotated[bool, typer.Option(help="Persist metrics as CSV.")] = True, + results_dir: Annotated[ + Path | None, + typer.Option( + help=( + "Directory to write CSV results and plots into. If omitted a " + "timestamped subdir under `results/` is used." + ) + ), + ] = None, +) -> None: + typer.echo( + f"Running Sugarscape IG (mesa, sequential) with {agents} agents on {width}x{height} for {steps} steps" + ) + + # Resolve output folder + timestamp = datetime.now(timezone.utc).strftime("%Y%m%d_%H%M%S") + if results_dir is None: + results_dir = ( + Path(__file__).resolve().parent / "results" / timestamp + ).resolve() + results_dir.mkdir(parents=True, exist_ok=True) + + start_time = perf_counter() + result = simulate( + agents=agents, + steps=steps, + width=width, + height=height, + max_sugar=max_sugar, + seed=seed, + ) + typer.echo(f"Simulation completed in {perf_counter() - start_time:.3f} seconds") + dc = result.datacollector + + # Extract metrics using DataCollector API + model_pd = ( + dc.get_model_vars_dataframe().reset_index().rename(columns={"index": "step"}) + ) + # Keep the full model metrics (step + any model reporters) + + # Show tail for quick inspection (exclude seed column from display) + display_pd = ( + model_pd.drop(columns=["seed"]) if "seed" in model_pd.columns else model_pd + ) + typer.echo( + f"Metrics in the final 5 steps:\n{display_pd.tail(5).to_string(index=False)}" + ) + + # Save CSV (full model metrics) + if save_results: + csv_path = results_dir / "model.csv" + model_pd.to_csv(csv_path, index=False) + typer.echo(f"Saved CSV results under {results_dir}") + + # Plot per-metric similar to the backend_frames example: create a + # `plots/` subdirectory and generate one figure per model metric column + if plot and not model_pd.empty: + plots_dir = results_dir / "plots" + plots_dir.mkdir(parents=True, exist_ok=True) + + # Determine which columns to plot (preserve 'step' if present). + # Exclude 'seed' from plots so we don't create a chart for a constant + # model reporter; keep 'seed' in the CSV/dataframe for reproducibility. + value_cols = [c for c in model_pd.columns if c not in {"step", "seed"}] + for col in value_cols: + stem = f"{col}_{timestamp}" + single = ( + model_pd[["step", col]] + if "step" in model_pd.columns + else model_pd[[col]] + ) + # Convert the single-column pandas DataFrame to Polars for the + # shared plotting helper. + single_pl = pl.from_pandas(single) + # Omit seed from subtitle/plot metadata to avoid leaking a constant + # value into the figure (it remains in the saved CSV). If you want + # to include the seed in filenames or external metadata, prefer + # annotating the output folder or README instead. + plot_model_metrics( + single_pl, + plots_dir, + stem, + title=f"Sugarscape IG - {col.capitalize()}", + subtitle="mesa backend", + agents=agents, + steps=steps, + ) + + typer.echo(f"Saved plots under {plots_dir}") + + # Skip CSV-saved confirmation when results are not persisted + + +if __name__ == "__main__": + app() diff --git a/examples/utils.py b/examples/utils.py new file mode 100644 index 00000000..4d075dc4 --- /dev/null +++ b/examples/utils.py @@ -0,0 +1,25 @@ +from dataclasses import dataclass +import mesa_frames +import mesa + + +@dataclass +class FramesSimulationResult: + """Container for example simulation outputs. + + The dataclass is intentionally permissive: some backends only provide + `metrics`, while others also return `agent_metrics`. + """ + + datacollector: mesa_frames.DataCollector + + +@dataclass +class MesaSimulationResult: + """Container for example simulation outputs. + + The dataclass is intentionally permissive: some backends only provide + `metrics`, while others also return `agent_metrics`. + """ + + datacollector: mesa.DataCollector diff --git a/pyproject.toml b/pyproject.toml index 6db5f7da..f9f1e343 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,6 +65,7 @@ test = [ docs = [ { include-group = "typechecking" }, + "typer>=0.9.0", "mkdocs-material>=9.6.14", "mkdocs-jupyter>=0.25.1", "mkdocs-git-revision-date-localized-plugin>=1.4.7", diff --git a/tests/test_benchmarks_cli.py b/tests/test_benchmarks_cli.py new file mode 100644 index 00000000..d63de8a2 --- /dev/null +++ b/tests/test_benchmarks_cli.py @@ -0,0 +1,112 @@ +from __future__ import annotations + +from pathlib import Path + +import polars as pl +import pytest + +from benchmarks import cli + + +def _register_dummy_model(monkeypatch: pytest.MonkeyPatch) -> None: + # Small, fast no-op backends to avoid running real simulations in tests + b1 = cli.Backend(name="mesa", runner=lambda agents, steps, seed=None: None) + b2 = cli.Backend(name="frames", runner=lambda agents, steps, seed=None: None) + monkeypatch.setitem( + cli.MODELS, "dummy", cli.ModelConfig(name="dummy", backends=[b1, b2]) + ) + + +def _pick_timestamp_dir(base: Path) -> Path: + # After a run that writes, the base results dir will contain a single timestamp subdir + subs = [p for p in base.iterdir() if p.is_dir()] + assert len(subs) <= 1 + return subs[0] if subs else base + + +def test_summary_save_and_plot( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture +): + _register_dummy_model(monkeypatch) + cli.run( + models=["dummy"], + agents=[1], + steps=1, + repeats=1, + seed=1, + save=True, + plot=True, + results_dir=tmp_path, + ) + out = capsys.readouterr().out + assert "Unified benchmark outputs written:" in out + assert "CSVs under" in out and "plots under" in out + ts = _pick_timestamp_dir(tmp_path) + # CSV should be present + assert any(ts.glob("*_perf_*.csv")) + + +def test_summary_plot_only( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture +): + _register_dummy_model(monkeypatch) + cli.run( + models=["dummy"], + agents=[1], + steps=1, + repeats=1, + seed=1, + save=False, + plot=True, + results_dir=tmp_path, + ) + out = capsys.readouterr().out + assert "Unified benchmark outputs written:" in out + assert "plots under" in out and "CSVs under" not in out + ts = _pick_timestamp_dir(tmp_path) + # No CSVs should be present, but plots subdir should exist + assert not any(ts.glob("*_perf_*.csv")) + assert (ts / "plots").exists() + + +def test_summary_save_only( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture +): + _register_dummy_model(monkeypatch) + cli.run( + models=["dummy"], + agents=[1], + steps=1, + repeats=1, + seed=1, + save=True, + plot=False, + results_dir=tmp_path, + ) + out = capsys.readouterr().out + assert "Unified benchmark outputs written:" in out + assert "CSVs under" in out and "plots under" not in out + ts = _pick_timestamp_dir(tmp_path) + # CSV should be present, plots subdir should not exist + assert any(ts.glob("*_perf_*.csv")) + assert not (ts / "plots").exists() + + +def test_summary_neither( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch, capsys: pytest.CaptureFixture +): + _register_dummy_model(monkeypatch) + cli.run( + models=["dummy"], + agents=[1], + steps=1, + repeats=1, + seed=1, + save=False, + plot=False, + results_dir=tmp_path, + ) + out = capsys.readouterr().out + assert "Benchmark run completed (save=False, plot=False; no files written)." in out + # No directories created + assert not any(p for p in tmp_path.iterdir()) diff --git a/tests/test_sugarscape_mesa_cli.py b/tests/test_sugarscape_mesa_cli.py new file mode 100644 index 00000000..651143c7 --- /dev/null +++ b/tests/test_sugarscape_mesa_cli.py @@ -0,0 +1,26 @@ +from pathlib import Path + + +def test_saved_csv_echo_is_guarded_by_save_results_flag() -> None: + """Ensure the CSV-saved confirmation is printed only when results are saved. + + This is a light-weight static check that avoids importing heavy runtime + dependencies (e.g., `mesa`) in CI environments where they may be absent. + """ + path = Path("examples/sugarscape_ig/backend_mesa/model.py") + src = path.read_text() + + needle = 'typer.echo(f"Saved CSV results under {results_dir}")' + # Ensure the message exists in the file (we just moved it into the if block) + assert needle in src + + # For each occurrence, ensure it is preceded by an `if save_results:` within + # the previous 6 lines (simple heuristic that is robust enough for this fix). + lines = src.splitlines() + for idx, line in enumerate(lines): + if needle in line: + window_start = max(0, idx - 6) + window = "\n".join(lines[window_start:idx]) + assert "if save_results:" in window, ( + "Found CSV-saved confirmation not guarded by `if save_results:`" + ) diff --git a/uv.lock b/uv.lock index ee2f031c..94ae7a6e 100644 --- a/uv.lock +++ b/uv.lock @@ -995,25 +995,26 @@ wheels = [ [[package]] name = "llvmlite" -version = "0.44.0" +version = "0.46.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/89/6a/95a3d3610d5c75293d5dbbb2a76480d5d4eeba641557b69fe90af6c5b84e/llvmlite-0.44.0.tar.gz", hash = "sha256:07667d66a5d150abed9157ab6c0b9393c9356f229784a4385c02f99e94fc94d4", size = 171880, upload-time = "2025-01-20T11:14:41.342Z" } +sdist = { url = "https://files.pythonhosted.org/packages/74/cd/08ae687ba099c7e3d21fe2ea536500563ef1943c5105bf6ab4ee3829f68e/llvmlite-0.46.0.tar.gz", hash = "sha256:227c9fd6d09dce2783c18b754b7cd9d9b3b3515210c46acc2d3c5badd9870ceb", size = 193456, upload-time = "2025-12-08T18:15:36.295Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/b5/e2/86b245397052386595ad726f9742e5223d7aea999b18c518a50e96c3aca4/llvmlite-0.44.0-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:eed7d5f29136bda63b6d7804c279e2b72e08c952b7c5df61f45db408e0ee52f3", size = 28132305, upload-time = "2025-01-20T11:12:53.936Z" }, - { url = "https://files.pythonhosted.org/packages/ff/ec/506902dc6870249fbe2466d9cf66d531265d0f3a1157213c8f986250c033/llvmlite-0.44.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ace564d9fa44bb91eb6e6d8e7754977783c68e90a471ea7ce913bff30bd62427", size = 26201090, upload-time = "2025-01-20T11:12:59.847Z" }, - { url = "https://files.pythonhosted.org/packages/99/fe/d030f1849ebb1f394bb3f7adad5e729b634fb100515594aca25c354ffc62/llvmlite-0.44.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c5d22c3bfc842668168a786af4205ec8e3ad29fb1bc03fd11fd48460d0df64c1", size = 42361858, upload-time = "2025-01-20T11:13:07.623Z" }, - { url = "https://files.pythonhosted.org/packages/d7/7a/ce6174664b9077fc673d172e4c888cb0b128e707e306bc33fff8c2035f0d/llvmlite-0.44.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f01a394e9c9b7b1d4e63c327b096d10f6f0ed149ef53d38a09b3749dcf8c9610", size = 41184200, upload-time = "2025-01-20T11:13:20.058Z" }, - { url = "https://files.pythonhosted.org/packages/5f/c6/258801143975a6d09a373f2641237992496e15567b907a4d401839d671b8/llvmlite-0.44.0-cp311-cp311-win_amd64.whl", hash = "sha256:d8489634d43c20cd0ad71330dde1d5bc7b9966937a263ff1ec1cebb90dc50955", size = 30331193, upload-time = "2025-01-20T11:13:26.976Z" }, - { url = "https://files.pythonhosted.org/packages/15/86/e3c3195b92e6e492458f16d233e58a1a812aa2bfbef9bdd0fbafcec85c60/llvmlite-0.44.0-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:1d671a56acf725bf1b531d5ef76b86660a5ab8ef19bb6a46064a705c6ca80aad", size = 28132297, upload-time = "2025-01-20T11:13:32.57Z" }, - { url = "https://files.pythonhosted.org/packages/d6/53/373b6b8be67b9221d12b24125fd0ec56b1078b660eeae266ec388a6ac9a0/llvmlite-0.44.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f79a728e0435493611c9f405168682bb75ffd1fbe6fc360733b850c80a026db", size = 26201105, upload-time = "2025-01-20T11:13:38.744Z" }, - { url = "https://files.pythonhosted.org/packages/cb/da/8341fd3056419441286c8e26bf436923021005ece0bff5f41906476ae514/llvmlite-0.44.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0143a5ef336da14deaa8ec26c5449ad5b6a2b564df82fcef4be040b9cacfea9", size = 42361901, upload-time = "2025-01-20T11:13:46.711Z" }, - { url = "https://files.pythonhosted.org/packages/53/ad/d79349dc07b8a395a99153d7ce8b01d6fcdc9f8231355a5df55ded649b61/llvmlite-0.44.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d752f89e31b66db6f8da06df8b39f9b91e78c5feea1bf9e8c1fba1d1c24c065d", size = 41184247, upload-time = "2025-01-20T11:13:56.159Z" }, - { url = "https://files.pythonhosted.org/packages/e2/3b/a9a17366af80127bd09decbe2a54d8974b6d8b274b39bf47fbaedeec6307/llvmlite-0.44.0-cp312-cp312-win_amd64.whl", hash = "sha256:eae7e2d4ca8f88f89d315b48c6b741dcb925d6a1042da694aa16ab3dd4cbd3a1", size = 30332380, upload-time = "2025-01-20T11:14:02.442Z" }, - { url = "https://files.pythonhosted.org/packages/89/24/4c0ca705a717514c2092b18476e7a12c74d34d875e05e4d742618ebbf449/llvmlite-0.44.0-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:319bddd44e5f71ae2689859b7203080716448a3cd1128fb144fe5c055219d516", size = 28132306, upload-time = "2025-01-20T11:14:09.035Z" }, - { url = "https://files.pythonhosted.org/packages/01/cf/1dd5a60ba6aee7122ab9243fd614abcf22f36b0437cbbe1ccf1e3391461c/llvmlite-0.44.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c58867118bad04a0bb22a2e0068c693719658105e40009ffe95c7000fcde88e", size = 26201090, upload-time = "2025-01-20T11:14:15.401Z" }, - { url = "https://files.pythonhosted.org/packages/d2/1b/656f5a357de7135a3777bd735cc7c9b8f23b4d37465505bd0eaf4be9befe/llvmlite-0.44.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46224058b13c96af1365290bdfebe9a6264ae62fb79b2b55693deed11657a8bf", size = 42361904, upload-time = "2025-01-20T11:14:22.949Z" }, - { url = "https://files.pythonhosted.org/packages/d8/e1/12c5f20cb9168fb3464a34310411d5ad86e4163c8ff2d14a2b57e5cc6bac/llvmlite-0.44.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:aa0097052c32bf721a4efc03bd109d335dfa57d9bffb3d4c24cc680711b8b4fc", size = 41184245, upload-time = "2025-01-20T11:14:31.731Z" }, - { url = "https://files.pythonhosted.org/packages/d0/81/e66fc86539293282fd9cb7c9417438e897f369e79ffb62e1ae5e5154d4dd/llvmlite-0.44.0-cp313-cp313-win_amd64.whl", hash = "sha256:2fb7c4f2fb86cbae6dca3db9ab203eeea0e22d73b99bc2341cdf9de93612e930", size = 30331193, upload-time = "2025-01-20T11:14:38.578Z" }, + { url = "https://files.pythonhosted.org/packages/7a/a1/2ad4b2367915faeebe8447f0a057861f646dbf5fbbb3561db42c65659cf3/llvmlite-0.46.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:82f3d39b16f19aa1a56d5fe625883a6ab600d5cc9ea8906cca70ce94cabba067", size = 37232766, upload-time = "2025-12-08T18:14:48.836Z" }, + { url = "https://files.pythonhosted.org/packages/12/b5/99cf8772fdd846c07da4fd70f07812a3c8fd17ea2409522c946bb0f2b277/llvmlite-0.46.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a3df43900119803bbc52720e758c76f316a9a0f34612a886862dfe0a5591a17e", size = 56275175, upload-time = "2025-12-08T18:14:51.604Z" }, + { url = "https://files.pythonhosted.org/packages/38/f2/ed806f9c003563732da156139c45d970ee435bd0bfa5ed8de87ba972b452/llvmlite-0.46.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de183fefc8022d21b0aa37fc3e90410bc3524aed8617f0ff76732fc6c3af5361", size = 55128630, upload-time = "2025-12-08T18:14:55.107Z" }, + { url = "https://files.pythonhosted.org/packages/19/0c/8f5a37a65fc9b7b17408508145edd5f86263ad69c19d3574e818f533a0eb/llvmlite-0.46.0-cp311-cp311-win_amd64.whl", hash = "sha256:e8b10bc585c58bdffec9e0c309bb7d51be1f2f15e169a4b4d42f2389e431eb93", size = 38138652, upload-time = "2025-12-08T18:14:58.171Z" }, + { url = "https://files.pythonhosted.org/packages/2b/f8/4db016a5e547d4e054ff2f3b99203d63a497465f81ab78ec8eb2ff7b2304/llvmlite-0.46.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6b9588ad4c63b4f0175a3984b85494f0c927c6b001e3a246a3a7fb3920d9a137", size = 37232767, upload-time = "2025-12-08T18:15:00.737Z" }, + { url = "https://files.pythonhosted.org/packages/aa/85/4890a7c14b4fa54400945cb52ac3cd88545bbdb973c440f98ca41591cdc5/llvmlite-0.46.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3535bd2bb6a2d7ae4012681ac228e5132cdb75fefb1bcb24e33f2f3e0c865ed4", size = 56275176, upload-time = "2025-12-08T18:15:03.936Z" }, + { url = "https://files.pythonhosted.org/packages/6a/07/3d31d39c1a1a08cd5337e78299fca77e6aebc07c059fbd0033e3edfab45c/llvmlite-0.46.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4cbfd366e60ff87ea6cc62f50bc4cd800ebb13ed4c149466f50cf2163a473d1e", size = 55128630, upload-time = "2025-12-08T18:15:07.196Z" }, + { url = "https://files.pythonhosted.org/packages/2a/6b/d139535d7590a1bba1ceb68751bef22fadaa5b815bbdf0e858e3875726b2/llvmlite-0.46.0-cp312-cp312-win_amd64.whl", hash = "sha256:398b39db462c39563a97b912d4f2866cd37cba60537975a09679b28fbbc0fb38", size = 38138940, upload-time = "2025-12-08T18:15:10.162Z" }, + { url = "https://files.pythonhosted.org/packages/e6/ff/3eba7eb0aed4b6fca37125387cd417e8c458e750621fce56d2c541f67fa8/llvmlite-0.46.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:30b60892d034bc560e0ec6654737aaa74e5ca327bd8114d82136aa071d611172", size = 37232767, upload-time = "2025-12-08T18:15:13.22Z" }, + { url = "https://files.pythonhosted.org/packages/0e/54/737755c0a91558364b9200702c3c9c15d70ed63f9b98a2c32f1c2aa1f3ba/llvmlite-0.46.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6cc19b051753368a9c9f31dc041299059ee91aceec81bd57b0e385e5d5bf1a54", size = 56275176, upload-time = "2025-12-08T18:15:16.339Z" }, + { url = "https://files.pythonhosted.org/packages/e6/91/14f32e1d70905c1c0aa4e6609ab5d705c3183116ca02ac6df2091868413a/llvmlite-0.46.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bca185892908f9ede48c0acd547fe4dc1bafefb8a4967d47db6cf664f9332d12", size = 55128629, upload-time = "2025-12-08T18:15:19.493Z" }, + { url = "https://files.pythonhosted.org/packages/4a/a7/d526ae86708cea531935ae777b6dbcabe7db52718e6401e0fb9c5edea80e/llvmlite-0.46.0-cp313-cp313-win_amd64.whl", hash = "sha256:67438fd30e12349ebb054d86a5a1a57fd5e87d264d2451bcfafbbbaa25b82a35", size = 38138941, upload-time = "2025-12-08T18:15:22.536Z" }, + { url = "https://files.pythonhosted.org/packages/95/ae/af0ffb724814cc2ea64445acad05f71cff5f799bb7efb22e47ee99340dbc/llvmlite-0.46.0-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:d252edfb9f4ac1fcf20652258e3f102b26b03eef738dc8a6ffdab7d7d341d547", size = 37232768, upload-time = "2025-12-08T18:15:25.055Z" }, + { url = "https://files.pythonhosted.org/packages/c9/19/5018e5352019be753b7b07f7759cdabb69ca5779fea2494be8839270df4c/llvmlite-0.46.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:379fdd1c59badeff8982cb47e4694a6143bec3bb49aa10a466e095410522064d", size = 56275173, upload-time = "2025-12-08T18:15:28.109Z" }, + { url = "https://files.pythonhosted.org/packages/9f/c9/d57877759d707e84c082163c543853245f91b70c804115a5010532890f18/llvmlite-0.46.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2e8cbfff7f6db0fa2c771ad24154e2a7e457c2444d7673e6de06b8b698c3b269", size = 55128628, upload-time = "2025-12-08T18:15:31.098Z" }, + { url = "https://files.pythonhosted.org/packages/30/a8/e61a8c2b3cc7a597073d9cde1fcbb567e9d827f1db30c93cf80422eac70d/llvmlite-0.46.0-cp314-cp314-win_amd64.whl", hash = "sha256:7821eda3ec1f18050f981819756631d60b6d7ab1a6cf806d9efefbe3f4082d61", size = 39153056, upload-time = "2025-12-08T18:15:33.938Z" }, ] [[package]] @@ -1255,6 +1256,7 @@ dev = [ { name = "sphinx-copybutton" }, { name = "sphinx-design" }, { name = "sphinx-rtd-theme" }, + { name = "typer" }, ] docs = [ { name = "autodocsumm" }, @@ -1274,6 +1276,7 @@ docs = [ { name = "sphinx-copybutton" }, { name = "sphinx-design" }, { name = "sphinx-rtd-theme" }, + { name = "typer" }, ] test = [ { name = "beartype" }, @@ -1289,7 +1292,7 @@ requires-dist = [ { name = "boto3", specifier = ">=1.35.91" }, { name = "numpy", specifier = ">=2.0.2" }, { name = "polars", specifier = ">=1.30.0" }, - { name = "psycopg2-binary", specifier = "==2.9.10" }, + { name = "psycopg2-binary", specifier = ">=2.9.10" }, { name = "pyarrow", specifier = ">=20.0.0" }, { name = "typing-extensions", specifier = ">=4.15.0" }, ] @@ -1304,7 +1307,7 @@ dev = [ { name = "mkdocs-jupyter", specifier = ">=0.25.1" }, { name = "mkdocs-material", specifier = ">=9.6.14" }, { name = "mkdocs-minify-plugin", specifier = ">=0.8.0" }, - { name = "numba", specifier = ">=0.60.0" }, + { name = "numba", specifier = ">=0.62" }, { name = "numpydoc", specifier = ">=1.8.0" }, { name = "perfplot", specifier = ">=0.10.2" }, { name = "pre-commit", specifier = ">=4.2.0" }, @@ -1318,6 +1321,7 @@ dev = [ { name = "sphinx-copybutton", specifier = ">=0.5.2" }, { name = "sphinx-design", specifier = ">=0.6.1" }, { name = "sphinx-rtd-theme", specifier = ">=3.0.2" }, + { name = "typer", specifier = ">=0.9.0" }, ] docs = [ { name = "autodocsumm", specifier = ">=0.2.14" }, @@ -1337,6 +1341,7 @@ docs = [ { name = "sphinx-copybutton", specifier = ">=0.5.2" }, { name = "sphinx-design", specifier = ">=0.6.1" }, { name = "sphinx-rtd-theme", specifier = ">=3.0.2" }, + { name = "typer", specifier = ">=0.9.0" }, ] test = [ { name = "beartype", specifier = ">=0.21.0" }, @@ -1559,29 +1564,30 @@ wheels = [ [[package]] name = "numba" -version = "0.61.2" +version = "0.63.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "llvmlite" }, { name = "numpy" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1c/a0/e21f57604304aa03ebb8e098429222722ad99176a4f979d34af1d1ee80da/numba-0.61.2.tar.gz", hash = "sha256:8750ee147940a6637b80ecf7f95062185ad8726c8c28a2295b8ec1160a196f7d", size = 2820615, upload-time = "2025-04-09T02:58:07.659Z" } +sdist = { url = "https://files.pythonhosted.org/packages/dc/60/0145d479b2209bd8fdae5f44201eceb8ce5a23e0ed54c71f57db24618665/numba-0.63.1.tar.gz", hash = "sha256:b320aa675d0e3b17b40364935ea52a7b1c670c9037c39cf92c49502a75902f4b", size = 2761666, upload-time = "2025-12-10T02:57:39.002Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/3f/97/c99d1056aed767503c228f7099dc11c402906b42a4757fec2819329abb98/numba-0.61.2-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:efd3db391df53aaa5cfbee189b6c910a5b471488749fd6606c3f33fc984c2ae2", size = 2775825, upload-time = "2025-04-09T02:57:43.442Z" }, - { url = "https://files.pythonhosted.org/packages/95/9e/63c549f37136e892f006260c3e2613d09d5120672378191f2dc387ba65a2/numba-0.61.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:49c980e4171948ffebf6b9a2520ea81feed113c1f4890747ba7f59e74be84b1b", size = 2778695, upload-time = "2025-04-09T02:57:44.968Z" }, - { url = "https://files.pythonhosted.org/packages/97/c8/8740616c8436c86c1b9a62e72cb891177d2c34c2d24ddcde4c390371bf4c/numba-0.61.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3945615cd73c2c7eba2a85ccc9c1730c21cd3958bfcf5a44302abae0fb07bb60", size = 3829227, upload-time = "2025-04-09T02:57:46.63Z" }, - { url = "https://files.pythonhosted.org/packages/fc/06/66e99ae06507c31d15ff3ecd1f108f2f59e18b6e08662cd5f8a5853fbd18/numba-0.61.2-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbfdf4eca202cebade0b7d43896978e146f39398909a42941c9303f82f403a18", size = 3523422, upload-time = "2025-04-09T02:57:48.222Z" }, - { url = "https://files.pythonhosted.org/packages/0f/a4/2b309a6a9f6d4d8cfba583401c7c2f9ff887adb5d54d8e2e130274c0973f/numba-0.61.2-cp311-cp311-win_amd64.whl", hash = "sha256:76bcec9f46259cedf888041b9886e257ae101c6268261b19fda8cfbc52bec9d1", size = 2831505, upload-time = "2025-04-09T02:57:50.108Z" }, - { url = "https://files.pythonhosted.org/packages/b4/a0/c6b7b9c615cfa3b98c4c63f4316e3f6b3bbe2387740277006551784218cd/numba-0.61.2-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:34fba9406078bac7ab052efbf0d13939426c753ad72946baaa5bf9ae0ebb8dd2", size = 2776626, upload-time = "2025-04-09T02:57:51.857Z" }, - { url = "https://files.pythonhosted.org/packages/92/4a/fe4e3c2ecad72d88f5f8cd04e7f7cff49e718398a2fac02d2947480a00ca/numba-0.61.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4ddce10009bc097b080fc96876d14c051cc0c7679e99de3e0af59014dab7dfe8", size = 2779287, upload-time = "2025-04-09T02:57:53.658Z" }, - { url = "https://files.pythonhosted.org/packages/9a/2d/e518df036feab381c23a624dac47f8445ac55686ec7f11083655eb707da3/numba-0.61.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5b1bb509d01f23d70325d3a5a0e237cbc9544dd50e50588bc581ba860c213546", size = 3885928, upload-time = "2025-04-09T02:57:55.206Z" }, - { url = "https://files.pythonhosted.org/packages/10/0f/23cced68ead67b75d77cfcca3df4991d1855c897ee0ff3fe25a56ed82108/numba-0.61.2-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:48a53a3de8f8793526cbe330f2a39fe9a6638efcbf11bd63f3d2f9757ae345cd", size = 3577115, upload-time = "2025-04-09T02:57:56.818Z" }, - { url = "https://files.pythonhosted.org/packages/68/1d/ddb3e704c5a8fb90142bf9dc195c27db02a08a99f037395503bfbc1d14b3/numba-0.61.2-cp312-cp312-win_amd64.whl", hash = "sha256:97cf4f12c728cf77c9c1d7c23707e4d8fb4632b46275f8f3397de33e5877af18", size = 2831929, upload-time = "2025-04-09T02:57:58.45Z" }, - { url = "https://files.pythonhosted.org/packages/0b/f3/0fe4c1b1f2569e8a18ad90c159298d862f96c3964392a20d74fc628aee44/numba-0.61.2-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:3a10a8fc9afac40b1eac55717cece1b8b1ac0b946f5065c89e00bde646b5b154", size = 2771785, upload-time = "2025-04-09T02:57:59.96Z" }, - { url = "https://files.pythonhosted.org/packages/e9/71/91b277d712e46bd5059f8a5866862ed1116091a7cb03bd2704ba8ebe015f/numba-0.61.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7d3bcada3c9afba3bed413fba45845f2fb9cd0d2b27dd58a1be90257e293d140", size = 2773289, upload-time = "2025-04-09T02:58:01.435Z" }, - { url = "https://files.pythonhosted.org/packages/0d/e0/5ea04e7ad2c39288c0f0f9e8d47638ad70f28e275d092733b5817cf243c9/numba-0.61.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:bdbca73ad81fa196bd53dc12e3aaf1564ae036e0c125f237c7644fe64a4928ab", size = 3893918, upload-time = "2025-04-09T02:58:02.933Z" }, - { url = "https://files.pythonhosted.org/packages/17/58/064f4dcb7d7e9412f16ecf80ed753f92297e39f399c905389688cf950b81/numba-0.61.2-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:5f154aaea625fb32cfbe3b80c5456d514d416fcdf79733dd69c0df3a11348e9e", size = 3584056, upload-time = "2025-04-09T02:58:04.538Z" }, - { url = "https://files.pythonhosted.org/packages/af/a4/6d3a0f2d3989e62a18749e1e9913d5fa4910bbb3e3311a035baea6caf26d/numba-0.61.2-cp313-cp313-win_amd64.whl", hash = "sha256:59321215e2e0ac5fa928a8020ab00b8e57cda8a97384963ac0dfa4d4e6aa54e7", size = 2831846, upload-time = "2025-04-09T02:58:06.125Z" }, + { url = "https://files.pythonhosted.org/packages/70/90/5f8614c165d2e256fbc6c57028519db6f32e4982475a372bbe550ea0454c/numba-0.63.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b33db00f18ccc790ee9911ce03fcdfe9d5124637d1ecc266f5ae0df06e02fec3", size = 2680501, upload-time = "2025-12-10T02:57:09.797Z" }, + { url = "https://files.pythonhosted.org/packages/dc/9d/d0afc4cf915edd8eadd9b2ab5b696242886ee4f97720d9322650d66a88c6/numba-0.63.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7d31ea186a78a7c0f6b1b2a3fe68057fdb291b045c52d86232b5383b6cf4fc25", size = 3744945, upload-time = "2025-12-10T02:57:11.697Z" }, + { url = "https://files.pythonhosted.org/packages/05/a9/d82f38f2ab73f3be6f838a826b545b80339762ee8969c16a8bf1d39395a8/numba-0.63.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ed3bb2fbdb651d6aac394388130a7001aab6f4541837123a4b4ab8b02716530c", size = 3450827, upload-time = "2025-12-10T02:57:13.709Z" }, + { url = "https://files.pythonhosted.org/packages/18/3f/a9b106e93c5bd7434e65f044bae0d204e20aa7f7f85d72ceb872c7c04216/numba-0.63.1-cp311-cp311-win_amd64.whl", hash = "sha256:1ecbff7688f044b1601be70113e2fb1835367ee0b28ffa8f3adf3a05418c5c87", size = 2747262, upload-time = "2025-12-10T02:57:15.664Z" }, + { url = "https://files.pythonhosted.org/packages/14/9c/c0974cd3d00ff70d30e8ff90522ba5fbb2bcee168a867d2321d8d0457676/numba-0.63.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2819cd52afa5d8d04e057bdfd54367575105f8829350d8fb5e4066fb7591cc71", size = 2680981, upload-time = "2025-12-10T02:57:17.579Z" }, + { url = "https://files.pythonhosted.org/packages/cb/70/ea2bc45205f206b7a24ee68a159f5097c9ca7e6466806e7c213587e0c2b1/numba-0.63.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5cfd45dbd3d409e713b1ccfdc2ee72ca82006860254429f4ef01867fdba5845f", size = 3801656, upload-time = "2025-12-10T02:57:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/0d/82/4f4ba4fd0f99825cbf3cdefd682ca3678be1702b63362011de6e5f71f831/numba-0.63.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:69a599df6976c03b7ecf15d05302696f79f7e6d10d620367407517943355bcb0", size = 3501857, upload-time = "2025-12-10T02:57:20.721Z" }, + { url = "https://files.pythonhosted.org/packages/af/fd/6540456efa90b5f6604a86ff50dabefb187e43557e9081adcad3be44f048/numba-0.63.1-cp312-cp312-win_amd64.whl", hash = "sha256:bbad8c63e4fc7eb3cdb2c2da52178e180419f7969f9a685f283b313a70b92af3", size = 2750282, upload-time = "2025-12-10T02:57:22.474Z" }, + { url = "https://files.pythonhosted.org/packages/57/f7/e19e6eff445bec52dde5bed1ebb162925a8e6f988164f1ae4b3475a73680/numba-0.63.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:0bd4fd820ef7442dcc07da184c3f54bb41d2bdb7b35bacf3448e73d081f730dc", size = 2680954, upload-time = "2025-12-10T02:57:24.145Z" }, + { url = "https://files.pythonhosted.org/packages/e9/6c/1e222edba1e20e6b113912caa9b1665b5809433cbcb042dfd133c6f1fd38/numba-0.63.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:53de693abe4be3bd4dee38e1c55f01c55ff644a6a3696a3670589e6e4c39cde2", size = 3809736, upload-time = "2025-12-10T02:57:25.836Z" }, + { url = "https://files.pythonhosted.org/packages/76/0a/590bad11a8b3feeac30a24d01198d46bdb76ad15c70d3a530691ce3cae58/numba-0.63.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:81227821a72a763c3d4ac290abbb4371d855b59fdf85d5af22a47c0e86bf8c7e", size = 3508854, upload-time = "2025-12-10T02:57:27.438Z" }, + { url = "https://files.pythonhosted.org/packages/4e/f5/3800384a24eed1e4d524669cdbc0b9b8a628800bb1e90d7bd676e5f22581/numba-0.63.1-cp313-cp313-win_amd64.whl", hash = "sha256:eb227b07c2ac37b09432a9bda5142047a2d1055646e089d4a240a2643e508102", size = 2750228, upload-time = "2025-12-10T02:57:30.36Z" }, + { url = "https://files.pythonhosted.org/packages/36/2f/53be2aa8a55ee2608ebe1231789cbb217f6ece7f5e1c685d2f0752e95a5b/numba-0.63.1-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:f180883e5508940cc83de8a8bea37fc6dd20fbe4e5558d4659b8b9bef5ff4731", size = 2681153, upload-time = "2025-12-10T02:57:32.016Z" }, + { url = "https://files.pythonhosted.org/packages/13/91/53e59c86759a0648282368d42ba732c29524a745fd555ed1fb1df83febbe/numba-0.63.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0938764afa82a47c0e895637a6c55547a42c9e1d35cac42285b1fa60a8b02bb", size = 3778718, upload-time = "2025-12-10T02:57:33.764Z" }, + { url = "https://files.pythonhosted.org/packages/6c/0c/2be19eba50b0b7636f6d1f69dfb2825530537708a234ba1ff34afc640138/numba-0.63.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f90a929fa5094e062d4e0368ede1f4497d5e40f800e80aa5222c4734236a2894", size = 3478712, upload-time = "2025-12-10T02:57:35.518Z" }, + { url = "https://files.pythonhosted.org/packages/0d/5f/4d0c9e756732577a52211f31da13a3d943d185f7fb90723f56d79c696caa/numba-0.63.1-cp314-cp314-win_amd64.whl", hash = "sha256:8d6d5ce85f572ed4e1a135dbb8c0114538f9dd0e3657eeb0bb64ab204cbe2a8f", size = 2752161, upload-time = "2025-12-10T02:57:37.12Z" }, ] [[package]] @@ -2522,6 +2528,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/83/11/00d3c3dfc25ad54e731d91449895a79e4bf2384dc3ac01809010ba88f6d5/seaborn-0.13.2-py3-none-any.whl", hash = "sha256:636f8336facf092165e27924f223d3c62ca560b1f2bb5dff7ab7fad265361987", size = 294914, upload-time = "2024-01-25T13:21:49.598Z" }, ] +[[package]] +name = "shellingham" +version = "1.5.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/58/15/8b3609fd3830ef7b27b655beb4b4e9c62313a4e8da8c676e142cc210d58e/shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de", size = 10310, upload-time = "2023-10-24T04:13:40.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/f9/0595336914c5619e5f28a1fb793285925a8cd4b432c9da0a987836c7f822/shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686", size = 9755, upload-time = "2023-10-24T04:13:38.866Z" }, +] + [[package]] name = "six" version = "1.17.0" @@ -2834,6 +2849,21 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359, upload-time = "2024-04-19T11:11:46.763Z" }, ] +[[package]] +name = "typer" +version = "0.19.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "rich" }, + { name = "shellingham" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/21/ca/950278884e2ca20547ff3eb109478c6baf6b8cf219318e6bc4f666fad8e8/typer-0.19.2.tar.gz", hash = "sha256:9ad824308ded0ad06cc716434705f691d4ee0bfd0fb081839d2e426860e7fdca", size = 104755, upload-time = "2025-09-23T09:47:48.256Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/22/35617eee79080a5d071d0f14ad698d325ee6b3bf824fc0467c03b30e7fa8/typer-0.19.2-py3-none-any.whl", hash = "sha256:755e7e19670ffad8283db353267cb81ef252f595aa6834a0d1ca9312d9326cb9", size = 46748, upload-time = "2025-09-23T09:47:46.777Z" }, +] + [[package]] name = "typing-extensions" version = "4.15.0"