diff --git a/benchmarks/README.md b/benchmarks/README.md new file mode 100644 index 0000000..553bcde --- /dev/null +++ b/benchmarks/README.md @@ -0,0 +1,327 @@ +# Callosum RPC Benchmark Suite + +Comprehensive performance benchmark suite for the Callosum RPC library with detailed profiling capabilities. + +## Features + +- **Throughput Benchmarks**: Measure requests/second with varying payload sizes and client counts +- **Latency Benchmarks**: Analyze latency percentiles (p50, p95, p99) under different load levels +- **Feature Overhead**: Measure performance impact of compression and CURVE authentication +- **Detailed Profiling**: CPU profiling (cProfile) and memory tracking (tracemalloc) +- **Rich Reports**: Interactive HTML reports with Plotly charts + colorful console output + +## Installation + +Install the benchmark dependencies: + +```bash +# Using uv (recommended) +uv pip install -e ".[benchmark,zeromq]" + +# Or using pip +pip install -e ".[benchmark,zeromq]" +``` + +This installs: +- `rich` - Console output with tables and colors +- `plotly` - Interactive charts for HTML reports +- `pandas` - Data processing +- `numpy` - Statistical calculations +- `scipy` - Advanced statistics +- `jinja2` - HTML templating +- `pyzmq` - ZeroMQ transport (required for RPC) + +## Quick Start + +Run all benchmarks with default settings: + +```bash +python -m benchmarks +``` + +Run quick test (reduced iterations): + +```bash +python -m benchmarks --quick +``` + +Run specific scenario: + +```bash +python -m benchmarks --scenario throughput +python -m benchmarks --scenario latency +python -m benchmarks --scenario features +``` + +List available scenarios: + +```bash +python -m benchmarks --list +``` + +## Command-Line Options + +``` +Options: + --scenario [all|throughput|latency|features] + Which scenarios to run (default: all) + --output-dir PATH Output directory for results (default: benchmark-results) + --format [html|json|both] Output format (default: both) + --quick Run with reduced iterations for quick testing + --no-profiling Disable CPU and memory profiling + --list List available scenarios and exit + --help Show this message and exit +``` + +## Benchmark Scenarios + +### Throughput Benchmarks + +**Variable Payload Sizes** +- Tests: 64B, 256B, 1KB, 4KB, 16KB, 64KB, 256KB, 1MB +- Measures: requests/second, MB/second +- Compares: Both schedulers (ExitOrdered vs KeySerialized) + +**Variable Client Counts** +- Tests: 1, 2, 5, 10, 20, 50, 100 concurrent clients +- Fixed: 1KB payload +- Measures: Total throughput, scalability + +**Scheduler Comparison** +- Compares: ExitOrderedAsyncScheduler vs KeySerializedAsyncScheduler +- Measures: Performance overhead of ordering guarantees + +### Latency Benchmarks + +**Latency Under Load** +- Tests: Different load levels (100, 500, 1000, 2000, 5000 req/s) +- Measures: p50, p95, p99, p99.9 latencies +- Duration: 30 seconds per load level + +**Latency by Payload Size** +- Tests: Various payload sizes +- Measures: Latency distribution for each size +- Fixed: 10 concurrent clients + +**Tail Latency Analysis** +- Long-running: 10,000 requests +- Identifies: Jitter and outliers +- Detailed: Time-series latency tracking + +### Feature Overhead Benchmarks + +**Compression Overhead** +- Compares: Snappy compression on/off +- Payload sizes: 1KB, 10KB, 100KB, 1MB +- Measures: Throughput impact, CPU overhead + +**Authentication Overhead** +- Compares: CURVE encryption on/off +- Measures: Handshake latency, throughput impact +- Fixed: 1KB payload, 10 clients + +**Combined Features Matrix** +- Tests: All combinations of compression × authentication +- Measures: Combined performance impact + +## Profiling + +### CPU Profiling (cProfile) + +Identifies function-level CPU hotspots: +- Top 20 functions by cumulative time +- Call counts and timing +- Focuses on callosum.* modules + +### Memory Profiling (tracemalloc) + +Tracks memory allocations: +- Peak memory usage +- Memory increase during benchmark +- Scheduler queue sizes (leak detection) +- Top allocation locations + +Disable profiling for faster benchmarks: + +```bash +python -m benchmarks --no-profiling +``` + +## Output + +### Console Output + +Real-time colorized output with: +- Progress bars during execution +- Results table with key metrics +- Summary statistics +- Profiling results (if enabled) + +### HTML Report + +Interactive report with: +- Executive summary with key metrics +- Plotly charts (line charts, box plots, bar charts) +- Sortable tables +- Profiling results +- Downloadable raw data (JSON) + +### JSON Output + +Machine-readable format with: +- All metrics and configuration +- Raw request data +- Profiling results +- Suitable for CI/CD integration + +## Example Usage + +### Basic Benchmark Run + +```bash +# Run all benchmarks with HTML and JSON output +python -m benchmarks + +# Results will be in: benchmark-results/ +# - benchmark-report-YYYYMMDD-HHMMSS.html +# - benchmark-results-YYYYMMDD-HHMMSS.json +``` + +### Quick Performance Check + +```bash +# Fast iteration for development +python -m benchmarks --quick --scenario throughput +``` + +### Deep Profiling + +```bash +# Run with full profiling enabled (default) +python -m benchmarks --scenario latency --format html +``` + +### CI/CD Integration + +```bash +# JSON-only output for automated testing +python -m benchmarks --quick --format json --output-dir ci-results +``` + +## Configuration + +The default configuration can be found in `benchmarks/core/config.py`. Key parameters: + +```python +# Throughput config +payload_sizes = [64, 256, 1024, 4096, 16384, 65536, 262144, 1048576] +client_counts = [1, 2, 5, 10, 20, 50, 100] +requests_per_test = 10000 + +# Latency config +target_loads = [100, 500, 1000, 2000, 5000] # req/s +duration_seconds = 30 +payload_sizes_test = [64, 256, 1024, 4096, 16384, 65536] + +# Feature config +compression_payloads = [1024, 10240, 102400, 1048576] +requests_per_test = 5000 + +# Global settings +iterations = 3 # Number of runs per benchmark +warmup_iterations = 1 +``` + +## Architecture + +``` +benchmarks/ +├── core/ # Core infrastructure +│ ├── config.py # Configuration dataclasses +│ ├── metrics.py # Metric data structures +│ ├── profiler.py # CPU and memory profiling +│ └── runner.py # Main orchestration +├── scenarios/ # Benchmark scenarios +│ ├── base.py # Base scenario class +│ ├── throughput.py +│ ├── latency.py +│ └── features.py +├── fixtures/ # Server and client implementations +│ ├── server.py # Benchmark server +│ └── client.py # Benchmark client +├── reporters/ # Output formatting +│ ├── console.py # Rich console output +│ └── html.py # HTML report generation +├── utils/ # Utilities +│ ├── payload.py # Payload generators +│ ├── statistics.py # Statistical functions +│ └── auth_helpers.py # Auth setup +├── cli.py # Command-line interface +└── __main__.py # Module entry point +``` + +## Interpreting Results + +### Throughput + +- **Higher is better** for requests/second +- Look for: Scalability with client count +- Compare: Scheduler overhead (ExitOrdered vs KeySerialized) + +### Latency + +- **Lower is better** for all percentiles +- p50 (median): Typical latency +- p95: Most users' experience +- p99: Tail latency, important for SLAs +- p99.9: Extreme outliers + +### Memory Leaks + +If you see warnings about memory leaks: +- Check scheduler queue sizes (should be 0 after cleanup) +- Review memory increase (should be minimal) +- Profiling shows which code is allocating memory + +### Regression Detection + +Compare current results with baseline: +- >10% throughput decrease: Warning +- >25% throughput decrease: Critical +- >10% latency increase: Warning +- >25% latency increase: Critical + +## Troubleshooting + +### "Connection refused" errors + +Ensure no other process is using port 5020 or similar ports. + +### High memory usage + +This is expected during profiling. Disable with `--no-profiling` if needed. + +### Slow benchmark execution + +Use `--quick` for faster iterations, or run specific scenarios only. + +### Dependencies not found + +Make sure you installed with benchmark extras: +```bash +pip install -e ".[benchmark,zeromq]" +``` + +## Contributing + +To add a new benchmark scenario: + +1. Create a new scenario class in `scenarios/` +2. Inherit from `BaseBenchmarkScenario` +3. Implement the `run()` method +4. Add to the runner in `core/runner.py` +5. Update CLI help text + +## License + +MIT License - See main project LICENSE file diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/benchmarks/__main__.py b/benchmarks/__main__.py new file mode 100644 index 0000000..9a35e70 --- /dev/null +++ b/benchmarks/__main__.py @@ -0,0 +1,13 @@ +""" +Main entry point for running benchmarks as a module. + +Usage: + python -m benchmarks + python -m benchmarks --scenario throughput + python -m benchmarks --quick +""" + +from benchmarks.cli import main + +if __name__ == "__main__": + main() diff --git a/benchmarks/cli.py b/benchmarks/cli.py new file mode 100644 index 0000000..23e8f06 --- /dev/null +++ b/benchmarks/cli.py @@ -0,0 +1,133 @@ +""" +CLI interface for Callosum benchmark suite. +""" + +import asyncio +from pathlib import Path + +import click + +from benchmarks.core.config import BenchmarkConfig +from benchmarks.core.runner import BenchmarkRunner +from benchmarks.reporters.console import ConsoleReporter + + +@click.command() +@click.option( + "--scenario", + type=click.Choice( + ["all", "throughput", "latency", "features"], case_sensitive=False + ), + default="all", + help="Which scenarios to run", +) +@click.option( + "--output-dir", + type=click.Path(path_type=Path), + default=Path("benchmark-results"), + help="Output directory for results", +) +@click.option( + "--format", + type=click.Choice(["html", "json", "both"], case_sensitive=False), + default="both", + help="Output format", +) +@click.option( + "--quick", + is_flag=True, + help="Run with reduced iterations for quick testing", +) +@click.option( + "--no-profiling", + is_flag=True, + help="Disable CPU and memory profiling", +) +@click.option( + "--list", + "list_scenarios", + is_flag=True, + help="List available scenarios and exit", +) +def main( + scenario: str, + output_dir: Path, + format: str, + quick: bool, + no_profiling: bool, + list_scenarios: bool, +): + """ + Callosum RPC Performance Benchmark Suite + + Run comprehensive performance benchmarks for the Callosum RPC library + with detailed profiling and reporting. + """ + console = ConsoleReporter() + + # List scenarios + if list_scenarios: + console.print_info("Available benchmark scenarios:") + console.console.print() + console.console.print(" [cyan]throughput[/cyan] - Throughput benchmarks") + console.console.print(" • Variable payload sizes") + console.console.print(" • Variable client counts") + console.console.print(" • Scheduler comparison") + console.console.print() + console.console.print( + " [cyan]latency[/cyan] - Latency percentile benchmarks" + ) + console.console.print(" • Latency under load") + console.console.print(" • Latency by payload size") + console.console.print(" • Tail latency analysis") + console.console.print() + console.console.print( + " [cyan]features[/cyan] - Feature overhead benchmarks" + ) + console.console.print(" • Compression overhead") + console.console.print(" • Authentication overhead") + console.console.print(" • Combined features matrix") + console.console.print() + return + + # Create configuration + if quick: + config = BenchmarkConfig.quick() + console.print_info("Using quick test configuration (reduced iterations)") + else: + config = BenchmarkConfig() + + # Disable profiling if requested + if no_profiling: + config.profiling.enabled = False + console.print_info("Profiling disabled") + + # Create and run benchmark runner + runner = BenchmarkRunner( + config=config, + console_reporter=console, + enable_profiling=config.profiling.enabled, + ) + + # Run benchmarks + scenario_filter = None if scenario == "all" else scenario + + try: + asyncio.run(runner.run_all(scenario_filter=scenario_filter)) + + # Save results + runner.save_results(output_dir=output_dir, format=format) + + console.console.print() + console.console.print("[green]✓[/green] Benchmarks completed successfully!") + console.console.print(f"Results saved to: {output_dir}") + + except KeyboardInterrupt: + console.print_warning("Benchmark interrupted by user") + except Exception as e: + console.print_error(f"Benchmark failed: {e}") + raise + + +if __name__ == "__main__": + main() diff --git a/benchmarks/core/__init__.py b/benchmarks/core/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/benchmarks/core/config.py b/benchmarks/core/config.py new file mode 100644 index 0000000..6c8dd2f --- /dev/null +++ b/benchmarks/core/config.py @@ -0,0 +1,106 @@ +""" +Benchmark configuration data structures. +""" + +from dataclasses import dataclass, field +from typing import List + + +@dataclass +class ThroughputConfig: + """Configuration for throughput benchmarks.""" + + payload_sizes: List[int] = field( + default_factory=lambda: [64, 256, 1024, 4096, 16384, 65536, 262144, 1048576] + ) + client_counts: List[int] = field( + default_factory=lambda: [1, 2, 5, 10, 20, 50, 100] + ) + requests_per_test: int = 10000 + warmup_requests: int = 1000 + + +@dataclass +class LatencyConfig: + """Configuration for latency benchmarks.""" + + target_loads: List[int] = field( + default_factory=lambda: [100, 500, 1000, 2000, 5000] + ) # requests/sec + duration_seconds: int = 30 + payload_size: int = 1024 + payload_sizes_test: List[int] = field( + default_factory=lambda: [64, 256, 1024, 4096, 16384, 65536] + ) + + +@dataclass +class FeatureConfig: + """Configuration for feature overhead benchmarks.""" + + compression_payloads: List[int] = field( + default_factory=lambda: [1024, 10240, 102400, 1048576] + ) + requests_per_test: int = 5000 + + +@dataclass +class ServerConfig: + """Configuration for benchmark server.""" + + bind_address: str = "tcp://127.0.0.1:*" + scheduler_types: List[str] = field( + default_factory=lambda: ["exit-ordered", "key-serialized"] + ) + + +@dataclass +class ProfilingConfig: + """Configuration for profiling.""" + + enabled: bool = True + profile_cpu: bool = True + profile_memory: bool = True + max_frames: int = 10 + + +@dataclass +class BenchmarkConfig: + """Complete benchmark configuration.""" + + throughput: ThroughputConfig = field(default_factory=ThroughputConfig) + latency: LatencyConfig = field(default_factory=LatencyConfig) + features: FeatureConfig = field(default_factory=FeatureConfig) + server: ServerConfig = field(default_factory=ServerConfig) + profiling: ProfilingConfig = field(default_factory=ProfilingConfig) + + # Global settings + iterations: int = 3 # Number of times to run each benchmark + warmup_iterations: int = 1 # Number of warmup iterations + + @classmethod + def quick(cls) -> "BenchmarkConfig": + """ + Create a quick test configuration with reduced parameters. + + Returns: + BenchmarkConfig optimized for quick testing + """ + config = cls() + config.iterations = 1 + config.warmup_iterations = 0 + + # Reduce test sizes + config.throughput.payload_sizes = [1024, 4096] + config.throughput.client_counts = [1, 5, 10] + config.throughput.requests_per_test = 1000 + config.throughput.warmup_requests = 100 + + config.latency.target_loads = [100, 500] + config.latency.duration_seconds = 10 + config.latency.payload_sizes_test = [1024, 4096] + + config.features.compression_payloads = [1024, 10240] + config.features.requests_per_test = 1000 + + return config diff --git a/benchmarks/core/metrics.py b/benchmarks/core/metrics.py new file mode 100644 index 0000000..4c78bbd --- /dev/null +++ b/benchmarks/core/metrics.py @@ -0,0 +1,156 @@ +""" +Core metrics data structures for benchmark results. +""" + +from dataclasses import dataclass, field +from datetime import datetime +from typing import Any, Dict, List, Optional, Tuple + + +@dataclass +class RequestMetric: + """Single request measurement.""" + + timestamp: float + latency_ms: float + payload_size_bytes: int + success: bool + error: Optional[str] = None + + +@dataclass +class ThroughputMetric: + """Throughput measurement.""" + + requests_per_second: float + bytes_per_second: float + duration_seconds: float + total_requests: int + failed_requests: int + + @property + def success_rate(self) -> float: + """Calculate success rate as percentage.""" + if self.total_requests == 0: + return 0.0 + return ( + (self.total_requests - self.failed_requests) + / self.total_requests + * 100.0 + ) + + +@dataclass +class LatencyMetric: + """Latency statistics.""" + + min_ms: float + max_ms: float + mean_ms: float + median_ms: float + p95_ms: float + p99_ms: float + p999_ms: float + stddev_ms: float + + +@dataclass +class MemoryMetric: + """Memory usage statistics.""" + + peak_memory_mb: float + memory_increase_mb: float + allocations_count: int + scheduler_queue_sizes: Dict[str, int] + + @property + def has_memory_leak(self) -> bool: + """Check if there are potential memory leaks based on scheduler queues.""" + return any(size > 0 for size in self.scheduler_queue_sizes.values()) + + +@dataclass +class ProfileMetric: + """CPU profiling statistics.""" + + top_functions: List[Tuple[str, float]] # (function_name, cumulative_time_sec) + total_time_seconds: float + + def get_top_n(self, n: int = 10) -> List[Tuple[str, float, float]]: + """ + Get top N functions by cumulative time. + + Returns: + List of (function_name, cumulative_time_sec, percentage) + """ + result = [] + for func_name, cumtime in self.top_functions[:n]: + percentage = ( + (cumtime / self.total_time_seconds * 100.0) + if self.total_time_seconds > 0 + else 0.0 + ) + result.append((func_name, cumtime, percentage)) + return result + + +@dataclass +class BenchmarkResult: + """Complete benchmark result.""" + + scenario_name: str + config: Dict[str, Any] + throughput: ThroughputMetric + latency: LatencyMetric + memory: Optional[MemoryMetric] = None + profile: Optional[ProfileMetric] = None + raw_metrics: List[RequestMetric] = field(default_factory=list) + timestamp: str = field(default_factory=lambda: datetime.now().isoformat()) + metadata: Dict[str, Any] = field(default_factory=dict) + + def to_dict(self) -> Dict[str, Any]: + """Convert to dictionary for JSON serialization.""" + return { + "scenario_name": self.scenario_name, + "config": self.config, + "throughput": { + "requests_per_second": self.throughput.requests_per_second, + "bytes_per_second": self.throughput.bytes_per_second, + "duration_seconds": self.throughput.duration_seconds, + "total_requests": self.throughput.total_requests, + "failed_requests": self.throughput.failed_requests, + "success_rate": self.throughput.success_rate, + }, + "latency": { + "min_ms": self.latency.min_ms, + "max_ms": self.latency.max_ms, + "mean_ms": self.latency.mean_ms, + "median_ms": self.latency.median_ms, + "p95_ms": self.latency.p95_ms, + "p99_ms": self.latency.p99_ms, + "p999_ms": self.latency.p999_ms, + "stddev_ms": self.latency.stddev_ms, + }, + "memory": ( + { + "peak_memory_mb": self.memory.peak_memory_mb, + "memory_increase_mb": self.memory.memory_increase_mb, + "allocations_count": self.memory.allocations_count, + "scheduler_queue_sizes": self.memory.scheduler_queue_sizes, + "has_memory_leak": self.memory.has_memory_leak, + } + if self.memory + else None + ), + "profile": ( + { + "top_functions": self.profile.top_functions, + "total_time_seconds": self.profile.total_time_seconds, + "top_10": self.profile.get_top_n(10), + } + if self.profile + else None + ), + "timestamp": self.timestamp, + "metadata": self.metadata, + } diff --git a/benchmarks/core/profiler.py b/benchmarks/core/profiler.py new file mode 100644 index 0000000..0b6bb90 --- /dev/null +++ b/benchmarks/core/profiler.py @@ -0,0 +1,335 @@ +""" +CPU and memory profiling integration for benchmarks. +""" + +import cProfile +import pstats +import tracemalloc +from io import StringIO +from pathlib import Path +from typing import Any, Dict, List, Optional, Tuple + +from benchmarks.core.metrics import MemoryMetric, ProfileMetric + + +class CProfileProfiler: + """ + Function-level CPU profiling using cProfile. + """ + + def __init__(self, filter_callosum_only: bool = True): + """ + Initialize CPU profiler. + + Args: + filter_callosum_only: If True, focus on callosum.* modules + """ + self.filter_callosum_only = filter_callosum_only + self.profiler: Optional[cProfile.Profile] = None + self.stats: Optional[pstats.Stats] = None + + def start_profiling(self) -> None: + """Start CPU profiling.""" + self.profiler = cProfile.Profile() + self.profiler.enable() + + def stop_profiling(self) -> None: + """Stop CPU profiling and collect stats.""" + if self.profiler: + self.profiler.disable() + # Create stats from profiler + string_io = StringIO() + self.stats = pstats.Stats(self.profiler, stream=string_io) + self.stats.strip_dirs() + + def get_top_functions(self, n: int = 20) -> List[Tuple[str, float]]: + """ + Get top N functions by cumulative time. + + Args: + n: Number of top functions to return + + Returns: + List of (function_name, cumulative_time_seconds) + """ + if not self.stats: + return [] + + # Sort by cumulative time + self.stats.sort_stats("cumulative") + + # Extract function stats + result = [] + # Access internal stats dict (not in public API) + for func, (cc, nc, tt, ct, callers) in list(self.stats.stats.items())[:n]: # type: ignore[attr-defined] + filename, line, func_name = func + full_name = f"{Path(filename).name}:{line}({func_name})" + + # Filter if requested + if self.filter_callosum_only and "callosum" not in filename: + continue + + result.append((full_name, ct)) + + return result[:n] + + def get_total_time(self) -> float: + """ + Get total profiled time. + + Returns: + Total time in seconds + """ + if not self.stats: + return 0.0 + + return self.stats.total_tt # type: ignore[attr-defined] + + def save_stats(self, filepath: Path) -> None: + """ + Save profiling stats to file. + + Args: + filepath: Path to save stats + """ + if self.profiler: + self.profiler.dump_stats(str(filepath)) + + def get_profile_metric(self) -> ProfileMetric: + """ + Get ProfileMetric from collected stats. + + Returns: + ProfileMetric with top functions and total time + """ + return ProfileMetric( + top_functions=self.get_top_functions(20), + total_time_seconds=self.get_total_time(), + ) + + +class MemoryProfiler: + """ + Memory allocation tracking using tracemalloc. + + Follows the pattern from examples/simple-server.py. + """ + + def __init__(self, max_frames: int = 10): + """ + Initialize memory profiler. + + Args: + max_frames: Maximum number of stack frames to capture + """ + self.max_frames = max_frames + self.start_snapshot: Optional[tracemalloc.Snapshot] = None + self.end_snapshot: Optional[tracemalloc.Snapshot] = None + self.peak_memory: float = 0.0 + + def start_tracking(self) -> None: + """Start memory tracking.""" + tracemalloc.start(self.max_frames) + # Take initial snapshot + self.start_snapshot = self._take_filtered_snapshot() + self.peak_memory = 0.0 + + def _take_filtered_snapshot(self) -> tracemalloc.Snapshot: + """ + Take a filtered snapshot excluding system modules. + + Returns: + Filtered snapshot + """ + snapshot = tracemalloc.take_snapshot() + # Filter out system modules (following simple-server.py pattern) + return snapshot.filter_traces(( + tracemalloc.Filter(False, ""), + tracemalloc.Filter(False, tracemalloc.__file__), + )) + + def take_snapshot(self) -> tracemalloc.Snapshot: + """ + Take a memory snapshot. + + Returns: + Filtered snapshot + """ + snapshot = self._take_filtered_snapshot() + + # Update peak memory + current, peak = tracemalloc.get_traced_memory() + self.peak_memory = max(self.peak_memory, peak / (1024**2)) # Convert to MB + + return snapshot + + def stop_tracking(self) -> None: + """Stop memory tracking and take final snapshot.""" + self.end_snapshot = self._take_filtered_snapshot() + tracemalloc.stop() + + def get_memory_increase(self) -> float: + """ + Calculate memory increase from start to end. + + Returns: + Memory increase in MB + """ + if not self.start_snapshot or not self.end_snapshot: + return 0.0 + + # Calculate total allocated memory + start_total = sum( + stat.size for stat in self.start_snapshot.statistics("lineno") + ) + end_total = sum(stat.size for stat in self.end_snapshot.statistics("lineno")) + + increase_bytes = end_total - start_total + return increase_bytes / (1024**2) # Convert to MB + + def get_top_allocations(self, n: int = 10) -> List[Tuple[str, int, int]]: + """ + Get top N memory allocations. + + Args: + n: Number of top allocations to return + + Returns: + List of (location, size_kb, count) + """ + if not self.end_snapshot: + return [] + + top_stats = self.end_snapshot.statistics("lineno")[:n] + + result = [] + for stat in top_stats: + location = f"{stat.traceback}" + size_kb = stat.size / 1024 + count = stat.count + result.append((location, int(size_kb), count)) + + return result + + def get_memory_diff(self, n: int = 10) -> List[Tuple[str, int]]: + """ + Get top memory differences between start and end. + + Args: + n: Number of top differences to return + + Returns: + List of (location, size_diff_kb) + """ + if not self.start_snapshot or not self.end_snapshot: + return [] + + top_stats = self.end_snapshot.compare_to(self.start_snapshot, "lineno")[:n] + + result = [] + for stat in top_stats: + location = str(stat.traceback) + size_diff_kb = stat.size_diff / 1024 + result.append((location, int(size_diff_kb))) + + return result + + def get_allocation_count(self) -> int: + """ + Get total number of allocations. + + Returns: + Number of allocations + """ + if not self.end_snapshot: + return 0 + + return sum(stat.count for stat in self.end_snapshot.statistics("lineno")) + + def check_scheduler_queues( + self, scheduler_queue_sizes: Dict[str, int] + ) -> MemoryMetric: + """ + Create MemoryMetric with scheduler queue information. + + Args: + scheduler_queue_sizes: Dictionary of queue names to sizes + + Returns: + MemoryMetric with all collected information + """ + return MemoryMetric( + peak_memory_mb=self.peak_memory, + memory_increase_mb=self.get_memory_increase(), + allocations_count=self.get_allocation_count(), + scheduler_queue_sizes=scheduler_queue_sizes, + ) + + +class BenchmarkProfiler: + """ + Combined CPU and memory profiler with unified interface. + """ + + def __init__( + self, + profile_cpu: bool = True, + profile_memory: bool = True, + filter_callosum_only: bool = True, + ): + """ + Initialize benchmark profiler. + + Args: + profile_cpu: Enable CPU profiling + profile_memory: Enable memory profiling + filter_callosum_only: Focus on callosum modules only + """ + self.profile_cpu = profile_cpu + self.profile_memory = profile_memory + + self.cpu_profiler = ( + CProfileProfiler(filter_callosum_only=filter_callosum_only) + if profile_cpu + else None + ) + self.memory_profiler = MemoryProfiler() if profile_memory else None + + async def __aenter__(self) -> "BenchmarkProfiler": + """Start profiling as async context manager.""" + if self.cpu_profiler: + self.cpu_profiler.start_profiling() + if self.memory_profiler: + self.memory_profiler.start_tracking() + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + """Stop profiling.""" + if self.cpu_profiler: + self.cpu_profiler.stop_profiling() + if self.memory_profiler: + self.memory_profiler.stop_tracking() + + def get_results( + self, scheduler_queue_sizes: Optional[Dict[str, int]] = None + ) -> Tuple[Optional[ProfileMetric], Optional[MemoryMetric]]: + """ + Get profiling results. + + Args: + scheduler_queue_sizes: Optional scheduler queue sizes for memory metric + + Returns: + Tuple of (ProfileMetric, MemoryMetric) + """ + profile_metric = None + memory_metric = None + + if self.cpu_profiler: + profile_metric = self.cpu_profiler.get_profile_metric() + + if self.memory_profiler: + queue_sizes = scheduler_queue_sizes or {} + memory_metric = self.memory_profiler.check_scheduler_queues(queue_sizes) + + return profile_metric, memory_metric diff --git a/benchmarks/core/runner.py b/benchmarks/core/runner.py new file mode 100644 index 0000000..607022d --- /dev/null +++ b/benchmarks/core/runner.py @@ -0,0 +1,291 @@ +""" +Main benchmark runner orchestration. +""" + +import time +from pathlib import Path +from typing import List, Optional + +from benchmarks.core.config import BenchmarkConfig +from benchmarks.core.metrics import BenchmarkResult +from benchmarks.core.profiler import BenchmarkProfiler +from benchmarks.fixtures.server import SchedulerType +from benchmarks.reporters.console import ConsoleReporter +from benchmarks.reporters.html import HTMLReporter +from benchmarks.scenarios.features import ( + AuthenticationOverhead, + CombinedFeaturesMatrix, + CompressionOverhead, +) +from benchmarks.scenarios.latency import ( + LatencyByPayloadSize, + LatencyUnderLoad, + TailLatencyAnalysis, +) +from benchmarks.scenarios.throughput import ( + ThroughputByClientCount, + ThroughputByPayloadSize, + ThroughputSchedulerComparison, +) + + +class BenchmarkRunner: + """ + Main orchestrator for running benchmark suites. + """ + + def __init__( + self, + config: BenchmarkConfig, + console_reporter: Optional[ConsoleReporter] = None, + enable_profiling: bool = True, + ): + """ + Initialize benchmark runner. + + Args: + config: Benchmark configuration + console_reporter: Optional console reporter (creates default if None) + enable_profiling: Enable CPU and memory profiling + """ + self.config = config + self.console = console_reporter or ConsoleReporter() + self.enable_profiling = enable_profiling and config.profiling.enabled + self.all_results: List[BenchmarkResult] = [] + + def _create_profiler(self) -> Optional[BenchmarkProfiler]: + """Create profiler if enabled.""" + if not self.enable_profiling: + return None + + return BenchmarkProfiler( + profile_cpu=self.config.profiling.profile_cpu, + profile_memory=self.config.profiling.profile_memory, + ) + + async def run_throughput_benchmarks(self) -> List[BenchmarkResult]: + """Run all throughput benchmarks.""" + results = [] + + self.console.print_info("Running throughput benchmarks...") + + # Throughput by payload size + payload_scenario = ThroughputByPayloadSize( + config=self.config.throughput, + profiler=self._create_profiler(), + ) + + for payload_size in self.config.throughput.payload_sizes: + for scheduler_name in self.config.server.scheduler_types: + scheduler_type = SchedulerType(scheduler_name) + self.console.print_info( + f" Testing payload={payload_size}B, scheduler={scheduler_name}" + ) + result = await payload_scenario.run( + payload_size=payload_size, + scheduler_type=scheduler_type, + ) + results.append(result) + + # Throughput by client count + client_scenario = ThroughputByClientCount( + config=self.config.throughput, + profiler=self._create_profiler(), + ) + + for num_clients in self.config.throughput.client_counts: + for scheduler_name in self.config.server.scheduler_types: + scheduler_type = SchedulerType(scheduler_name) + self.console.print_info( + f" Testing clients={num_clients}, scheduler={scheduler_name}" + ) + result = await client_scenario.run( + num_clients=num_clients, + scheduler_type=scheduler_type, + ) + results.append(result) + + # Scheduler comparison + scheduler_scenario = ThroughputSchedulerComparison( + config=self.config.throughput, + profiler=self._create_profiler(), + ) + + for scheduler_name in self.config.server.scheduler_types: + scheduler_type = SchedulerType(scheduler_name) + self.console.print_info(f" Comparing scheduler={scheduler_name}") + result = await scheduler_scenario.run(scheduler_type=scheduler_type) + results.append(result) + + return results + + async def run_latency_benchmarks(self) -> List[BenchmarkResult]: + """Run all latency benchmarks.""" + results = [] + + self.console.print_info("Running latency benchmarks...") + + # Latency under load + load_scenario = LatencyUnderLoad( + config=self.config.latency, + profiler=self._create_profiler(), + ) + + for target_load in self.config.latency.target_loads: + self.console.print_info(f" Testing load={target_load} req/s") + result = await load_scenario.run(target_load=target_load) + results.append(result) + + # Latency by payload size + payload_scenario = LatencyByPayloadSize( + config=self.config.latency, + profiler=self._create_profiler(), + ) + + for payload_size in self.config.latency.payload_sizes_test: + self.console.print_info(f" Testing payload={payload_size}B") + result = await payload_scenario.run(payload_size=payload_size) + results.append(result) + + # Tail latency analysis + tail_scenario = TailLatencyAnalysis( + config=self.config.latency, + profiler=self._create_profiler(), + ) + + self.console.print_info(" Running tail latency analysis...") + result = await tail_scenario.run() + results.append(result) + + return results + + async def run_feature_benchmarks(self) -> List[BenchmarkResult]: + """Run all feature overhead benchmarks.""" + results = [] + + self.console.print_info("Running feature overhead benchmarks...") + + # Compression overhead + compression_scenario = CompressionOverhead( + config=self.config.features, + profiler=self._create_profiler(), + ) + + for payload_size in self.config.features.compression_payloads: + for compress in [False, True]: + self.console.print_info( + f" Testing compression={compress}, payload={payload_size}B" + ) + result = await compression_scenario.run( + payload_size=payload_size, + compress=compress, + ) + results.append(result) + + # Authentication overhead + auth_scenario = AuthenticationOverhead( + config=self.config.features, + profiler=self._create_profiler(), + ) + + for use_auth in [False, True]: + self.console.print_info(f" Testing authentication={use_auth}") + result = await auth_scenario.run(use_auth=use_auth) + results.append(result) + + # Combined features matrix + combined_scenario = CombinedFeaturesMatrix( + config=self.config.features, + profiler=self._create_profiler(), + ) + + for compress in [False, True]: + for use_auth in [False, True]: + self.console.print_info( + f" Testing compression={compress}, auth={use_auth}" + ) + result = await combined_scenario.run( + compress=compress, + use_auth=use_auth, + ) + results.append(result) + + return results + + async def run_all( + self, scenario_filter: Optional[str] = None + ) -> List[BenchmarkResult]: + """ + Run all benchmarks or filtered scenarios. + + Args: + scenario_filter: Optional filter - 'throughput', 'latency', 'features', or None for all + + Returns: + List of all benchmark results + """ + self.console.show_header("Callosum RPC Benchmark Suite") + + start_time = time.time() + all_results = [] + + # Run selected scenarios + if scenario_filter is None or scenario_filter == "throughput": + results = await self.run_throughput_benchmarks() + all_results.extend(results) + + if scenario_filter is None or scenario_filter == "latency": + results = await self.run_latency_benchmarks() + all_results.extend(results) + + if scenario_filter is None or scenario_filter == "features": + results = await self.run_feature_benchmarks() + all_results.extend(results) + + total_duration = time.time() - start_time + + # Display results + self.console.show_results_table(all_results, title="All Benchmark Results") + self.console.show_summary( + total_scenarios=len(all_results), + total_duration=total_duration, + ) + + self.all_results = all_results + return all_results + + def save_results( + self, + output_dir: Path, + format: str = "both", # 'html', 'json', 'both' + ) -> None: + """ + Save benchmark results to files. + + Args: + output_dir: Output directory + format: Output format - 'html', 'json', or 'both' + """ + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + + timestamp = time.strftime("%Y%m%d-%H%M%S") + + # Save JSON + if format in ["json", "both"]: + import json + + json_path = output_dir / f"benchmark-results-{timestamp}.json" + results_dict = [r.to_dict() for r in self.all_results] + json_path.write_text(json.dumps(results_dict, indent=2)) + self.console.print_info(f"Saved JSON results to {json_path}") + + # Save HTML + if format in ["html", "both"]: + html_path = output_dir / f"benchmark-report-{timestamp}.html" + html_reporter = HTMLReporter() + html_reporter.generate_report( + results=self.all_results, + output_path=html_path, + ) + self.console.print_info(f"Saved HTML report to {html_path}") diff --git a/benchmarks/fixtures/__init__.py b/benchmarks/fixtures/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/benchmarks/fixtures/client.py b/benchmarks/fixtures/client.py new file mode 100644 index 0000000..ca8a2c6 --- /dev/null +++ b/benchmarks/fixtures/client.py @@ -0,0 +1,267 @@ +""" +Benchmark client implementation. +""" + +import asyncio +import json +import time +from typing import Any, Callable, List, Optional, Tuple + +from benchmarks.core.metrics import RequestMetric +from callosum.auth import AbstractClientAuthenticator +from callosum.lower.zeromq import ZeroMQAddress, ZeroMQRPCTransport +from callosum.rpc.channel import Peer + + +class BenchmarkClient: + """ + Single client for benchmark workloads. + + Handles request execution with timing and error tracking. + """ + + def __init__( + self, + connect_address: ZeroMQAddress, + compress: bool = False, + authenticator: Optional[AbstractClientAuthenticator] = None, + ): + """ + Initialize benchmark client. + + Args: + connect_address: ZeroMQ address to connect to + compress: Enable Snappy compression + authenticator: Client authenticator for CURVE encryption + """ + self.connect_address = connect_address + self.compress = compress + self.authenticator = authenticator + self.peer: Optional[Peer] = None + + async def __aenter__(self) -> "BenchmarkClient": + """Connect the client as an async context manager.""" + self.peer = Peer( + connect=self.connect_address, + transport=ZeroMQRPCTransport, + serializer=lambda o: json.dumps(o).encode("utf8"), + deserializer=lambda b: json.loads(b), + authenticator=self.authenticator, + ) + await self.peer.__aenter__() + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + """Disconnect the client.""" + if self.peer: + await self.peer.__aexit__(exc_type, exc_val, exc_tb) + + async def invoke( + self, + method: str, + body: Any, + timeout: Optional[float] = None, + ) -> Tuple[Any, float]: + """ + Invoke a single RPC method and measure latency. + + Args: + method: Method name to invoke + body: Request body + timeout: Optional timeout in seconds + + Returns: + Tuple of (response, latency_ms) + + Raises: + Exception: If RPC call fails + """ + if not self.peer: + raise RuntimeError("Client not connected") + + start_time = time.perf_counter() + try: + result = await self.peer.invoke(method, body, invoke_timeout=timeout) + end_time = time.perf_counter() + latency_ms = (end_time - start_time) * 1000.0 + return result, latency_ms + except Exception: + end_time = time.perf_counter() + latency_ms = (end_time - start_time) * 1000.0 + raise + + async def run_requests( + self, + count: int, + method: str, + payload_generator: Callable[[], Any], + rate_limit: Optional[float] = None, + timeout: Optional[float] = None, + ) -> List[RequestMetric]: + """ + Run multiple requests with timing. + + Args: + count: Number of requests to make + method: Method name to invoke + payload_generator: Callable that generates request payload + rate_limit: Optional rate limit in requests/second + timeout: Optional timeout per request in seconds + + Returns: + List of RequestMetric for each request + """ + metrics = [] + delay_between_requests = (1.0 / rate_limit) if rate_limit else 0.0 + + for i in range(count): + payload = payload_generator() + payload_size = len( + json.dumps(payload).encode("utf8") + ) # Approximate size + + timestamp = time.time() + try: + _, latency_ms = await self.invoke(method, payload, timeout=timeout) + metrics.append( + RequestMetric( + timestamp=timestamp, + latency_ms=latency_ms, + payload_size_bytes=payload_size, + success=True, + ) + ) + except Exception as e: + # Record failed request + metrics.append( + RequestMetric( + timestamp=timestamp, + latency_ms=0.0, + payload_size_bytes=payload_size, + success=False, + error=str(e), + ) + ) + + # Rate limiting + if delay_between_requests > 0 and i < count - 1: + await asyncio.sleep(delay_between_requests) + + return metrics + + +class MultiClientRunner: + """ + Coordinates multiple clients for concurrent load testing. + """ + + def __init__( + self, + server_address: ZeroMQAddress, + num_clients: int, + compress: bool = False, + authenticators: Optional[List[Optional[AbstractClientAuthenticator]]] = None, + ): + """ + Initialize multi-client runner. + + Args: + server_address: Server address to connect to + num_clients: Number of concurrent clients + compress: Enable compression + authenticators: Optional list of authenticators (one per client) + """ + self.server_address = server_address + self.num_clients = num_clients + self.compress = compress + self.authenticators: List[Optional[AbstractClientAuthenticator]] = ( + authenticators if authenticators is not None else [None] * num_clients + ) + + if len(self.authenticators) != num_clients: + raise ValueError("Number of authenticators must match number of clients") + + async def run_concurrent_clients( + self, + requests_per_client: int, + method: str, + payload_generator: Callable[[], Any], + rate_limit: Optional[float] = None, + timeout: Optional[float] = None, + ) -> List[RequestMetric]: + """ + Run requests concurrently across multiple clients. + + Args: + requests_per_client: Number of requests each client should make + method: Method name to invoke + payload_generator: Callable that generates payloads + rate_limit: Optional rate limit per client in requests/second + timeout: Optional timeout per request + + Returns: + Aggregated list of RequestMetric from all clients + """ + + async def client_worker(client_id: int) -> List[RequestMetric]: + """Worker function for a single client.""" + client = BenchmarkClient( + connect_address=self.server_address, + compress=self.compress, + authenticator=self.authenticators[client_id], + ) + async with client: + return await client.run_requests( + count=requests_per_client, + method=method, + payload_generator=payload_generator, + rate_limit=rate_limit, + timeout=timeout, + ) + + # Run all clients concurrently + tasks = [ + asyncio.create_task(client_worker(i)) for i in range(self.num_clients) + ] + + results = await asyncio.gather(*tasks, return_exceptions=False) + + # Aggregate metrics from all clients + all_metrics = [] + for metrics in results: + all_metrics.extend(metrics) + + return all_metrics + + +async def warmup_connection( + server_address: ZeroMQAddress, + num_requests: int = 10, + compress: bool = False, + authenticator: Optional[AbstractClientAuthenticator] = None, +) -> None: + """ + Perform warmup requests to establish connection and warm up caches. + + Args: + server_address: Server address to connect to + num_requests: Number of warmup requests + compress: Enable compression + authenticator: Optional authenticator + """ + client = BenchmarkClient( + connect_address=server_address, + compress=compress, + authenticator=authenticator, + ) + + async with client: + for _ in range(num_requests): + try: + await client.invoke("echo", {"warmup": True}, timeout=5.0) + except Exception: + # Ignore warmup errors + pass + + # Small delay to let connection fully establish + await asyncio.sleep(0.1) diff --git a/benchmarks/fixtures/server.py b/benchmarks/fixtures/server.py new file mode 100644 index 0000000..a492070 --- /dev/null +++ b/benchmarks/fixtures/server.py @@ -0,0 +1,243 @@ +""" +Benchmark server implementation. +""" + +import asyncio +import json +from enum import Enum +from typing import Any, Dict, Optional + +from callosum.auth import AbstractServerAuthenticator +from callosum.lower.zeromq import ZeroMQAddress, ZeroMQRPCTransport +from callosum.ordering import ( + AbstractAsyncScheduler, + ExitOrderedAsyncScheduler, + KeySerializedAsyncScheduler, +) +from callosum.rpc.channel import Peer +from callosum.rpc.message import RPCMessage + + +class SchedulerType(Enum): + """Scheduler type enumeration.""" + + EXIT_ORDERED = "exit-ordered" + KEY_SERIALIZED = "key-serialized" + + +class BenchmarkServer: + """ + Reusable benchmark server with configurable handlers. + + Follows the pattern from tests/test_rpc.py for consistency. + """ + + def __init__( + self, + scheduler_type: SchedulerType = SchedulerType.EXIT_ORDERED, + bind_address: Optional[ZeroMQAddress] = None, + compress: bool = False, + authenticator: Optional[AbstractServerAuthenticator] = None, + ): + """ + Initialize benchmark server. + + Args: + scheduler_type: Type of scheduler to use + bind_address: ZeroMQ bind address. If None, uses random port on localhost + compress: Enable Snappy compression + authenticator: Server authenticator for CURVE encryption + """ + self.scheduler_type = scheduler_type + self.bind_address = bind_address or ZeroMQAddress("tcp://127.0.0.1:*") + self.compress = compress + self.authenticator = authenticator + self.done_event = asyncio.Event() + self.peer: Optional[Peer] = None + self.actual_address: Optional[str] = None + self._server_task: Optional[asyncio.Task] = None + + def _create_scheduler(self) -> AbstractAsyncScheduler: + """Create scheduler instance based on type.""" + if self.scheduler_type == SchedulerType.KEY_SERIALIZED: + return KeySerializedAsyncScheduler() + else: + return ExitOrderedAsyncScheduler() + + async def _echo_handler(self, request: RPCMessage) -> Any: + """Simple echo handler for latency tests.""" + return request.body + + async def _compute_handler(self, request: RPCMessage) -> Any: + """CPU-intensive handler for stress tests.""" + # Extract computation parameter from request + body = request.body + if isinstance(body, dict): + iterations = body.get("iterations", 1000) + else: + iterations = 1000 + + # Perform CPU-intensive work + result = 0 + for i in range(iterations): + result += i**2 + + return {"result": result, "iterations": iterations} + + async def _memory_handler(self, request: RPCMessage) -> Any: + """Memory allocation/deallocation handler.""" + body = request.body + if isinstance(body, dict): + size_kb = body.get("size_kb", 100) + else: + size_kb = 100 + + # Allocate memory + data = bytearray(size_kb * 1024) + # Fill with pattern + for i in range(len(data)): + data[i] = i % 256 + + # Return size to confirm allocation + return {"allocated_kb": size_kb} + + async def _variable_delay_handler(self, request: RPCMessage) -> Any: + """Handler with variable processing delay.""" + body = request.body + if isinstance(body, dict): + delay_ms = body.get("delay_ms", 0) + else: + delay_ms = 0 + + if delay_ms > 0: + await asyncio.sleep(delay_ms / 1000.0) + + return {"delayed_ms": delay_ms} + + async def _run_server(self) -> None: + """Run the server (internal coroutine).""" + scheduler = self._create_scheduler() + + self.peer = Peer( + bind=self.bind_address, + transport=ZeroMQRPCTransport, + scheduler=scheduler, + serializer=lambda o: json.dumps(o).encode("utf8"), + deserializer=lambda b: json.loads(b), + authenticator=self.authenticator, + ) + + # Register all handlers + self.peer.handle_function("echo", self._echo_handler) + self.peer.handle_function("compute", self._compute_handler) + self.peer.handle_function("memory", self._memory_handler) + self.peer.handle_function("variable_delay", self._variable_delay_handler) + + async with self.peer: + # Get actual bound address (useful when using random port) + import zmq + + underlying_sock = self.peer._transport._sock # type: ignore[attr-defined] + self.actual_address = underlying_sock.getsockopt( + zmq.LAST_ENDPOINT + ).decode("utf-8") + + await self.done_event.wait() + + async def __aenter__(self) -> "BenchmarkServer": + """Start the server as an async context manager.""" + # Start server in background task + self._server_task = asyncio.create_task(self._run_server()) + + # Wait a bit for server to be ready + await asyncio.sleep(0.1) + + return self + + async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: + """Stop the server and verify cleanup.""" + # Signal server to stop + self.done_event.set() + + # Wait for server task to complete + if self._server_task: + await self._server_task + + # Verify no memory leaks (following test pattern) + if self.peer and self.peer._scheduler: + scheduler = self.peer._scheduler + if isinstance(scheduler, KeySerializedAsyncScheduler): + # Check memory leak + assert len(scheduler._pending) == 0, ( + "KeySerializedAsyncScheduler has pending tasks" + ) + assert len(scheduler._tasks) == 0, ( + "KeySerializedAsyncScheduler has remaining tasks" + ) + assert len(scheduler._futures) == 0, ( + "KeySerializedAsyncScheduler has unfulfilled futures" + ) + elif isinstance(scheduler, ExitOrderedAsyncScheduler): + assert len(scheduler._tasks) == 0, ( + "ExitOrderedAsyncScheduler has remaining tasks" + ) + + def get_connect_address(self) -> ZeroMQAddress: + """ + Get the address clients should use to connect. + + Returns: + ZeroMQAddress suitable for client connection + """ + if self.actual_address: + # Convert tcp://0.0.0.0:12345 to tcp://localhost:12345 + addr = self.actual_address.replace("0.0.0.0", "localhost").replace( + "127.0.0.1", "localhost" + ) + return ZeroMQAddress(addr) + else: + return self.bind_address + + def verify_no_memory_leaks(self) -> bool: + """ + Verify that the scheduler has no pending tasks (no memory leaks). + + Returns: + True if no memory leaks detected + """ + if not self.peer or not self.peer._scheduler: + return True + + scheduler = self.peer._scheduler + if isinstance(scheduler, KeySerializedAsyncScheduler): + return ( + len(scheduler._pending) == 0 + and len(scheduler._tasks) == 0 + and len(scheduler._futures) == 0 + ) + elif isinstance(scheduler, ExitOrderedAsyncScheduler): + return len(scheduler._tasks) == 0 + + return True + + def get_scheduler_queue_sizes(self) -> Dict[str, int]: + """ + Get current scheduler queue sizes for memory tracking. + + Returns: + Dictionary with queue names and sizes + """ + if not self.peer or not self.peer._scheduler: + return {} + + scheduler = self.peer._scheduler + if isinstance(scheduler, KeySerializedAsyncScheduler): + return { + "pending": len(scheduler._pending), + "tasks": len(scheduler._tasks), + "futures": len(scheduler._futures), + } + elif isinstance(scheduler, ExitOrderedAsyncScheduler): + return {"tasks": len(scheduler._tasks)} + + return {} diff --git a/benchmarks/reporters/__init__.py b/benchmarks/reporters/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/benchmarks/reporters/console.py b/benchmarks/reporters/console.py new file mode 100644 index 0000000..e89a945 --- /dev/null +++ b/benchmarks/reporters/console.py @@ -0,0 +1,359 @@ +""" +Console reporter using Rich for formatted output. +""" + +from typing import List, Optional + +from rich.console import Console +from rich.panel import Panel +from rich.progress import ( + BarColumn, + Progress, + SpinnerColumn, + TextColumn, + TimeElapsedColumn, +) +from rich.table import Table +from rich.text import Text + +from benchmarks.core.metrics import BenchmarkResult + + +class ConsoleReporter: + """ + Rich console output for benchmark results. + """ + + def __init__(self): + """Initialize console reporter.""" + self.console = Console() + + def show_header(self, title: str) -> None: + """ + Display benchmark header. + + Args: + title: Benchmark suite title + """ + self.console.print() + self.console.rule(f"[bold blue]{title}", style="blue") + self.console.print() + + def create_progress( + self, description: str = "Running benchmarks..." + ) -> Progress: + """ + Create a progress bar for benchmark execution. + + Args: + description: Progress description + + Returns: + Progress instance + """ + return Progress( + SpinnerColumn(), + TextColumn("[progress.description]{task.description}"), + BarColumn(), + TextColumn("[progress.percentage]{task.percentage:>3.0f}%"), + TimeElapsedColumn(), + console=self.console, + ) + + def show_results_table( + self, + results: List[BenchmarkResult], + title: str = "Benchmark Results", + ) -> None: + """ + Display results in a formatted table. + + Args: + results: List of benchmark results + title: Table title + """ + if not results: + self.console.print("[yellow]No results to display[/yellow]") + return + + table = Table(title=title, show_header=True, header_style="bold magenta") + + # Add columns + table.add_column("Scenario", style="cyan", no_wrap=True) + table.add_column("Config", style="dim") + table.add_column("Throughput\n(req/s)", justify="right", style="green") + table.add_column("P50\n(ms)", justify="right") + table.add_column("P95\n(ms)", justify="right") + table.add_column("P99\n(ms)", justify="right") + table.add_column("Success\n(%)", justify="right") + table.add_column("Status", justify="center") + + # Add rows + for result in results: + # Format config string + config_parts = [] + if "payload_size" in result.config: + size_kb = result.config["payload_size"] / 1024 + config_parts.append(f"{size_kb:.0f}KB") + if "num_clients" in result.config: + config_parts.append(f"{result.config['num_clients']}c") + if "scheduler" in result.config: + sched = result.config["scheduler"] + if sched == "exit-ordered": + config_parts.append("exit") + elif sched == "key-serialized": + config_parts.append("key-ser") + if "compress" in result.config: + if result.config["compress"]: + config_parts.append("comp") + if "use_auth" in result.config: + if result.config["use_auth"]: + config_parts.append("auth") + + config_str = ", ".join(config_parts) if config_parts else "-" + + # Status indicator + success_rate = result.throughput.success_rate + if success_rate == 100.0: + status = "[green]✓[/green]" + elif success_rate >= 95.0: + status = "[yellow]⚠[/yellow]" + else: + status = "[red]✗[/red]" + + # Add memory leak warning + if result.memory and result.memory.has_memory_leak: + status = "[red]⚠ LEAK[/red]" + + table.add_row( + result.scenario_name, + config_str, + f"{result.throughput.requests_per_second:,.0f}", + f"{result.latency.median_ms:.2f}", + f"{result.latency.p95_ms:.2f}", + f"{result.latency.p99_ms:.2f}", + f"{success_rate:.1f}", + status, + ) + + self.console.print(table) + self.console.print() + + def show_summary( + self, + total_scenarios: int, + total_duration: float, + failed_scenarios: int = 0, + ) -> None: + """ + Display benchmark summary. + + Args: + total_scenarios: Total number of scenarios run + total_duration: Total duration in seconds + failed_scenarios: Number of failed scenarios + """ + summary_text = Text() + summary_text.append("Completed: ", style="bold") + summary_text.append(f"{total_scenarios} scenarios ", style="cyan") + summary_text.append(f"in {total_duration:.1f}s\n", style="dim") + + if failed_scenarios > 0: + summary_text.append( + f"Failed: {failed_scenarios} scenarios\n", style="red bold" + ) + + panel = Panel(summary_text, title="Summary", border_style="green") + self.console.print(panel) + + def show_comparison( + self, + current: BenchmarkResult, + baseline: Optional[BenchmarkResult], + ) -> None: + """ + Show side-by-side comparison with baseline. + + Args: + current: Current benchmark result + baseline: Baseline benchmark result + """ + if baseline is None: + self.print_warning("No baseline provided for comparison") + return + + table = Table(title=f"Comparison: {current.scenario_name}", show_header=True) + + table.add_column("Metric", style="cyan") + table.add_column("Baseline", justify="right") + table.add_column("Current", justify="right") + table.add_column("Change", justify="right") + + # Helper to calculate and format change + def format_change( + current_val: float, baseline_val: float, lower_is_better: bool = False + ) -> str: + if baseline_val == 0: + return "-" + + change_pct = ((current_val - baseline_val) / baseline_val) * 100.0 + + if lower_is_better: + # For latency, lower is better + if change_pct <= -10: + color = "green" + symbol = "↓" + elif change_pct >= 10: + color = "red" + symbol = "↑" + else: + color = "yellow" + symbol = "~" + else: + # For throughput, higher is better + if change_pct >= 10: + color = "green" + symbol = "↑" + elif change_pct <= -10: + color = "red" + symbol = "↓" + else: + color = "yellow" + symbol = "~" + + return f"[{color}]{symbol} {abs(change_pct):.1f}%[/{color}]" + + # Throughput + table.add_row( + "Throughput (req/s)", + f"{baseline.throughput.requests_per_second:,.0f}", + f"{current.throughput.requests_per_second:,.0f}", + format_change( + current.throughput.requests_per_second, + baseline.throughput.requests_per_second, + lower_is_better=False, + ), + ) + + # Latency metrics + table.add_row( + "P50 Latency (ms)", + f"{baseline.latency.median_ms:.2f}", + f"{current.latency.median_ms:.2f}", + format_change( + current.latency.median_ms, + baseline.latency.median_ms, + lower_is_better=True, + ), + ) + + table.add_row( + "P95 Latency (ms)", + f"{baseline.latency.p95_ms:.2f}", + f"{current.latency.p95_ms:.2f}", + format_change( + current.latency.p95_ms, + baseline.latency.p95_ms, + lower_is_better=True, + ), + ) + + table.add_row( + "P99 Latency (ms)", + f"{baseline.latency.p99_ms:.2f}", + f"{current.latency.p99_ms:.2f}", + format_change( + current.latency.p99_ms, + baseline.latency.p99_ms, + lower_is_better=True, + ), + ) + + self.console.print(table) + self.console.print() + + def show_profiling_results(self, result: BenchmarkResult) -> None: + """ + Display profiling results if available. + + Args: + result: Benchmark result with profiling data + """ + if not result.profile and not result.memory: + return + + self.console.print(f"[bold]Profiling Results: {result.scenario_name}[/bold]") + self.console.print() + + # CPU Profiling + if result.profile: + table = Table(title="Top CPU Hotspots", show_header=True) + table.add_column("Function", style="cyan") + table.add_column("Time (s)", justify="right", style="green") + table.add_column("%", justify="right") + + for func_name, cumtime, pct in result.profile.get_top_n(10): + table.add_row( + func_name[:60], # Truncate long names + f"{cumtime:.3f}", + f"{pct:.1f}%", + ) + + self.console.print(table) + self.console.print() + + # Memory Profiling + if result.memory: + table = Table(title="Memory Usage", show_header=True) + table.add_column("Metric", style="cyan") + table.add_column("Value", justify="right") + + table.add_row("Peak Memory", f"{result.memory.peak_memory_mb:.2f} MB") + table.add_row( + "Memory Increase", f"{result.memory.memory_increase_mb:.2f} MB" + ) + table.add_row("Allocations", f"{result.memory.allocations_count:,}") + + if result.memory.scheduler_queue_sizes: + for queue_name, size in result.memory.scheduler_queue_sizes.items(): + style = "red" if size > 0 else "green" + table.add_row( + f"Scheduler {queue_name}", + f"[{style}]{size}[/{style}]", + ) + + self.console.print(table) + self.console.print() + + if result.memory.has_memory_leak: + self.console.print( + "[red bold]⚠ Warning: Potential memory leak detected![/red bold]" + ) + self.console.print() + + def print_error(self, message: str) -> None: + """ + Print error message. + + Args: + message: Error message + """ + self.console.print(f"[red bold]Error:[/red bold] {message}") + + def print_warning(self, message: str) -> None: + """ + Print warning message. + + Args: + message: Warning message + """ + self.console.print(f"[yellow bold]Warning:[/yellow bold] {message}") + + def print_info(self, message: str) -> None: + """ + Print info message. + + Args: + message: Info message + """ + self.console.print(f"[blue]ℹ[/blue] {message}") diff --git a/benchmarks/reporters/html.py b/benchmarks/reporters/html.py new file mode 100644 index 0000000..10242c2 --- /dev/null +++ b/benchmarks/reporters/html.py @@ -0,0 +1,509 @@ +""" +HTML report generator with Plotly charts. +""" + +import json +from datetime import datetime +from pathlib import Path +from typing import List, Optional + +import plotly.graph_objects as go + +from benchmarks.core.metrics import BenchmarkResult + + +class HTMLReporter: + """ + Generate comprehensive HTML reports with interactive charts. + """ + + def __init__(self): + """Initialize HTML reporter.""" + pass + + def generate_report( + self, + results: List[BenchmarkResult], + output_path: Path, + baseline: Optional[List[BenchmarkResult]] = None, + title: str = "Callosum RPC Benchmark Results", + ) -> None: + """ + Generate complete HTML report. + + Args: + results: List of benchmark results + output_path: Path to save HTML file + baseline: Optional baseline results for comparison + title: Report title + """ + # Create HTML content + html_parts = [] + + # Header + html_parts.append(self._create_header(title)) + + # Executive Summary + html_parts.append(self._create_summary_section(results)) + + # Group results by scenario type + throughput_results = [r for r in results if "throughput" in r.scenario_name] + latency_results = [r for r in results if "latency" in r.scenario_name] + feature_results = [ + r + for r in results + if any( + x in r.scenario_name + for x in ["compression", "authentication", "combined"] + ) + ] + + # Throughput Section + if throughput_results: + html_parts.append(self._create_section_header("Throughput Benchmarks")) + html_parts.append(self._create_throughput_charts(throughput_results)) + + # Latency Section + if latency_results: + html_parts.append(self._create_section_header("Latency Benchmarks")) + html_parts.append(self._create_latency_charts(latency_results)) + + # Feature Overhead Section + if feature_results: + html_parts.append(self._create_section_header("Feature Overhead")) + html_parts.append(self._create_feature_charts(feature_results)) + + # Profiling Section + profiled_results = [r for r in results if r.profile or r.memory] + if profiled_results: + html_parts.append(self._create_section_header("Profiling Results")) + html_parts.append(self._create_profiling_section(profiled_results)) + + # Raw Data Section + html_parts.append(self._create_section_header("Raw Data")) + html_parts.append(self._create_raw_data_section(results)) + + # Footer + html_parts.append(self._create_footer()) + + # Write HTML file + html_content = "\n".join(html_parts) + output_path.parent.mkdir(parents=True, exist_ok=True) + output_path.write_text(html_content) + + def _create_header(self, title: str) -> str: + """Create HTML header.""" + return f""" + + + + + + {title} + + + + +

{title}

+

Generated: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}

+""" + + def _create_summary_section(self, results: List[BenchmarkResult]) -> str: + """Create executive summary section.""" + if not results: + return "" + + total_scenarios = len(results) + avg_throughput = sum( + r.throughput.requests_per_second for r in results + ) / len(results) + avg_p99 = sum(r.latency.p99_ms for r in results) / len(results) + total_requests = sum(r.throughput.total_requests for r in results) + + return f""" +
+

Executive Summary

+
+
+
{total_scenarios}
+
Scenarios Tested
+
+
+
{avg_throughput:,.0f}
+
Avg Throughput (req/s)
+
+
+
{avg_p99:.2f}
+
Avg P99 Latency (ms)
+
+
+
{total_requests:,}
+
Total Requests
+
+
+
+""" + + def _create_section_header(self, title: str) -> str: + """Create section header.""" + return f"

{title}

" + + def _create_throughput_charts(self, results: List[BenchmarkResult]) -> str: + """Create throughput visualization charts.""" + # Group by scenario type + by_payload = [r for r in results if "payload-size" in r.scenario_name] + by_client = [r for r in results if "client-count" in r.scenario_name] + by_scheduler = [ + r for r in results if "scheduler-comparison" in r.scenario_name + ] + + html_parts = [] + + # Throughput by payload size + if by_payload: + fig = go.Figure() + + # Group by scheduler + for scheduler in ["exit-ordered", "key-serialized"]: + scheduler_results = [ + r for r in by_payload if r.config.get("scheduler") == scheduler + ] + if scheduler_results: + x_values = [ + r.config["payload_size"] / 1024 for r in scheduler_results + ] # KB + y_values = [ + r.throughput.requests_per_second for r in scheduler_results + ] + + fig.add_trace( + go.Scatter( + x=x_values, + y=y_values, + mode="lines+markers", + name=scheduler, + line=dict(width=2), + marker=dict(size=8), + ) + ) + + fig.update_layout( + title="Throughput by Payload Size", + xaxis_title="Payload Size (KB)", + yaxis_title="Throughput (requests/sec)", + hovermode="x unified", + height=400, + ) + + html_parts.append( + '
' + ) + html_parts.append( + f'' + ) + + # Throughput by client count + if by_client: + fig = go.Figure() + + # Group by scheduler + for scheduler in ["exit-ordered", "key-serialized"]: + scheduler_results = [ + r for r in by_client if r.config.get("scheduler") == scheduler + ] + if scheduler_results: + x_values = [r.config["num_clients"] for r in scheduler_results] + y_values = [ + r.throughput.requests_per_second for r in scheduler_results + ] + + fig.add_trace( + go.Scatter( + x=x_values, + y=y_values, + mode="lines+markers", + name=scheduler, + line=dict(width=2), + marker=dict(size=8), + ) + ) + + fig.update_layout( + title="Throughput by Client Count", + xaxis_title="Number of Concurrent Clients", + yaxis_title="Throughput (requests/sec)", + hovermode="x unified", + height=400, + ) + + html_parts.append( + '
' + ) + html_parts.append( + f'' + ) + + # Scheduler comparison + if by_scheduler: + schedulers = [r.config["scheduler"] for r in by_scheduler] + throughputs = [r.throughput.requests_per_second for r in by_scheduler] + + fig = go.Figure( + data=[ + go.Bar( + x=schedulers, + y=throughputs, + marker_color=["#3498db", "#e74c3c"], + ) + ] + ) + + fig.update_layout( + title="Scheduler Comparison", + xaxis_title="Scheduler Type", + yaxis_title="Throughput (requests/sec)", + height=400, + ) + + html_parts.append( + '
' + ) + html_parts.append( + f'' + ) + + return "\n".join(html_parts) + + def _create_latency_charts(self, results: List[BenchmarkResult]) -> str: + """Create latency visualization charts.""" + html_parts = [] + + # Latency percentiles box plot + if results: + fig = go.Figure() + + for result in results: + config_str = f"{result.config.get('payload_size', '?')}B" + if "target_load_rps" in result.config: + config_str = f"{result.config['target_load_rps']} rps" + + fig.add_trace( + go.Box( + name=config_str, + y=[ + result.latency.min_ms, + result.latency.median_ms, + result.latency.p95_ms, + result.latency.p99_ms, + result.latency.max_ms, + ], + boxmean="sd", + ) + ) + + fig.update_layout( + title="Latency Distribution", + yaxis_title="Latency (ms)", + height=400, + ) + + html_parts.append( + '
' + ) + html_parts.append( + f'' + ) + + return "\n".join(html_parts) + + def _create_feature_charts(self, results: List[BenchmarkResult]) -> str: + """Create feature overhead charts.""" + html_parts = [] + + if results: + # Create comparison bar chart + labels = [] + throughputs = [] + + for result in results: + label_parts = [] + if "compress" in result.config: + label_parts.append( + "Comp" if result.config["compress"] else "NoComp" + ) + if "use_auth" in result.config: + label_parts.append( + "Auth" if result.config["use_auth"] else "NoAuth" + ) + + labels.append( + " + ".join(label_parts) if label_parts else result.scenario_name + ) + throughputs.append(result.throughput.requests_per_second) + + fig = go.Figure( + data=[go.Bar(x=labels, y=throughputs, marker_color="#9b59b6")] + ) + + fig.update_layout( + title="Feature Overhead Impact on Throughput", + xaxis_title="Feature Configuration", + yaxis_title="Throughput (requests/sec)", + height=400, + ) + + html_parts.append( + '
' + ) + html_parts.append( + f'' + ) + + return "\n".join(html_parts) + + def _create_profiling_section(self, results: List[BenchmarkResult]) -> str: + """Create profiling results section.""" + html_parts = [] + + for result in results: + if result.profile: + html_parts.append(f"

{result.scenario_name} - CPU Profile

") + html_parts.append("") + html_parts.append( + "" + ) + + for func_name, cumtime, pct in result.profile.get_top_n(10): + html_parts.append( + f"" + ) + + html_parts.append("
FunctionTime (s)%
{func_name[:80]}{cumtime:.3f}{pct:.1f}%
") + + if result.memory: + html_parts.append(f"

{result.scenario_name} - Memory Usage

") + html_parts.append("") + html_parts.append("") + html_parts.append( + f"" + ) + html_parts.append( + f"" + ) + html_parts.append( + f"" + ) + html_parts.append("
MetricValue
Peak Memory{result.memory.peak_memory_mb:.2f} MB
Memory Increase{result.memory.memory_increase_mb:.2f} MB
Allocations{result.memory.allocations_count:,}
") + + return "\n".join(html_parts) + + def _create_raw_data_section(self, results: List[BenchmarkResult]) -> str: + """Create raw data section with downloadable JSON.""" + # Convert results to dict + results_dict = [r.to_dict() for r in results] + json_data = json.dumps(results_dict, indent=2) + + return f""" +
+

Raw Data (JSON)

+

Download raw benchmark data for further analysis:

+ +

Full data available in exported JSON file

+
+""" + + def _create_footer(self) -> str: + """Create HTML footer.""" + return """ + + + +""" diff --git a/benchmarks/scenarios/__init__.py b/benchmarks/scenarios/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/benchmarks/scenarios/base.py b/benchmarks/scenarios/base.py new file mode 100644 index 0000000..3fa1c12 --- /dev/null +++ b/benchmarks/scenarios/base.py @@ -0,0 +1,174 @@ +""" +Base benchmark scenario class. +""" + +import time +from abc import ABC +from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple + +from benchmarks.core.metrics import ( + BenchmarkResult, + MemoryMetric, + ProfileMetric, + RequestMetric, +) +from benchmarks.core.profiler import BenchmarkProfiler +from benchmarks.fixtures.server import BenchmarkServer +from benchmarks.utils.statistics import ( + calculate_latency_metrics, + calculate_throughput, +) + + +class BaseBenchmarkScenario(ABC): + """ + Abstract base class for benchmark scenarios. + + Provides common setup/teardown and result aggregation logic. + """ + + def __init__( + self, + scenario_name: str, + profiler: Optional[BenchmarkProfiler] = None, + ): + """ + Initialize benchmark scenario. + + Args: + scenario_name: Name of the scenario for reporting + profiler: Optional profiler for CPU/memory tracking + """ + self.scenario_name = scenario_name + self.profiler = profiler + + async def run(self, **kwargs: Any) -> BenchmarkResult: + """ + Run the benchmark scenario. + + Subclasses should override this method with scenario-specific parameters. + + Args: + **kwargs: Scenario-specific parameters + + Returns: + BenchmarkResult with metrics + """ + raise NotImplementedError("Subclasses must implement run()") + + def create_result( + self, + config: Dict[str, Any], + metrics: List[RequestMetric], + duration: float, + profile_metric: Optional[ProfileMetric] = None, + memory_metric: Optional[MemoryMetric] = None, + ) -> BenchmarkResult: + """ + Create BenchmarkResult from collected metrics. + + Args: + config: Benchmark configuration dictionary + metrics: List of request metrics + duration: Total duration in seconds + profile_metric: Optional CPU profiling metric + memory_metric: Optional memory profiling metric + + Returns: + Complete BenchmarkResult + """ + # Calculate latency and throughput + latency = calculate_latency_metrics(metrics) + throughput = calculate_throughput(metrics, duration) + + return BenchmarkResult( + scenario_name=self.scenario_name, + config=config, + throughput=throughput, + latency=latency, + memory=memory_metric, + profile=profile_metric, + raw_metrics=metrics, + ) + + async def run_with_profiling( + self, + server: BenchmarkServer, + benchmark_func: Callable[..., Awaitable[List[RequestMetric]]], + **kwargs: Any, + ) -> Tuple[ + List[RequestMetric], float, Optional[ProfileMetric], Optional[MemoryMetric] + ]: + """ + Run benchmark with profiling enabled. + + Args: + server: BenchmarkServer instance + benchmark_func: Async function that runs the benchmark + **kwargs: Arguments to pass to benchmark_func + + Returns: + BenchmarkResult with profiling data + """ + if self.profiler: + async with self.profiler: + start_time = time.perf_counter() + metrics = await benchmark_func(**kwargs) + duration = time.perf_counter() - start_time + + # Get profiling results + scheduler_queue_sizes = server.get_scheduler_queue_sizes() + profile_metric, memory_metric = self.profiler.get_results( + scheduler_queue_sizes + ) + else: + start_time = time.perf_counter() + metrics = await benchmark_func(**kwargs) + duration = time.perf_counter() - start_time + profile_metric = None + memory_metric = None + + return metrics, duration, profile_metric, memory_metric + + +class MultiIterationRunner: + """ + Runs a scenario multiple times and aggregates results. + """ + + def __init__(self, iterations: int = 3, warmup_iterations: int = 1): + """ + Initialize multi-iteration runner. + + Args: + iterations: Number of measurement iterations + warmup_iterations: Number of warmup iterations (not measured) + """ + self.iterations = iterations + self.warmup_iterations = warmup_iterations + + async def run_scenario( + self, scenario: BaseBenchmarkScenario, **kwargs + ) -> List[BenchmarkResult]: + """ + Run a scenario multiple times. + + Args: + scenario: Scenario to run + **kwargs: Arguments to pass to scenario.run() + + Returns: + List of BenchmarkResult from each iteration + """ + results = [] + + # Warmup iterations (not measured) + for i in range(self.warmup_iterations): + await scenario.run(**kwargs) + + # Measurement iterations + for i in range(self.iterations): + result = await scenario.run(**kwargs) + results.append(result) + + return results diff --git a/benchmarks/scenarios/features.py b/benchmarks/scenarios/features.py new file mode 100644 index 0000000..06a6686 --- /dev/null +++ b/benchmarks/scenarios/features.py @@ -0,0 +1,312 @@ +""" +Feature overhead benchmark scenarios. +""" + +from typing import List, Optional + +from benchmarks.core.config import FeatureConfig +from benchmarks.core.metrics import BenchmarkResult, RequestMetric +from benchmarks.core.profiler import BenchmarkProfiler +from benchmarks.fixtures.client import MultiClientRunner, warmup_connection +from benchmarks.fixtures.server import BenchmarkServer, SchedulerType +from benchmarks.scenarios.base import BaseBenchmarkScenario +from benchmarks.utils.auth_helpers import ( + DummyClientAuthenticator, + DummyServerAuthenticator, + setup_benchmark_auth, +) +from benchmarks.utils.payload import PayloadGenerator + + +class CompressionOverhead(BaseBenchmarkScenario): + """ + Measure performance impact of Snappy compression. + """ + + def __init__( + self, + config: FeatureConfig, + profiler: Optional[BenchmarkProfiler] = None, + ): + super().__init__("compression-overhead", profiler) + self.config = config + + async def run( # type: ignore[override] + self, + payload_size: int, + compress: bool, + scheduler_type: SchedulerType = SchedulerType.EXIT_ORDERED, + ) -> BenchmarkResult: + """ + Run benchmark with/without compression. + + Args: + payload_size: Payload size in bytes + compress: Enable compression + scheduler_type: Scheduler type to use + + Returns: + BenchmarkResult showing compression impact + """ + num_clients = 10 + requests_per_client = self.config.requests_per_test // num_clients + + # Start server + server = BenchmarkServer( + scheduler_type=scheduler_type, + compress=compress, + authenticator=None, + ) + + async with server: + server_address = server.get_connect_address() + + # Warmup + await warmup_connection( + server_address, num_requests=100, compress=compress + ) + + # Use compressible payload for fair comparison + payload_gen = PayloadGenerator( + size_bytes=payload_size, kind="compressible" + ) + + # Run benchmark with profiling + async def run_benchmark() -> List[RequestMetric]: + client = MultiClientRunner( + server_address=server_address, + num_clients=num_clients, + compress=compress, + ) + return await client.run_concurrent_clients( + requests_per_client=requests_per_client, + method="echo", + payload_generator=payload_gen, + ) + + ( + metrics, + duration, + profile_metric, + memory_metric, + ) = await self.run_with_profiling(server, run_benchmark) + + # Create result + result_config = { + "payload_size": payload_size, + "compress": compress, + "num_clients": num_clients, + "requests": self.config.requests_per_test, + "scheduler": scheduler_type.value, + } + + return self.create_result( + config=result_config, + metrics=metrics, + duration=duration, + profile_metric=profile_metric, + memory_metric=memory_metric, + ) + + +class AuthenticationOverhead(BaseBenchmarkScenario): + """ + Measure performance impact of CURVE encryption. + """ + + def __init__( + self, + config: FeatureConfig, + profiler: Optional[BenchmarkProfiler] = None, + ): + super().__init__("authentication-overhead", profiler) + self.config = config + + async def run( # type: ignore[override] + self, + use_auth: bool, + scheduler_type: SchedulerType = SchedulerType.EXIT_ORDERED, + ) -> BenchmarkResult: + """ + Run benchmark with/without CURVE authentication. + + Args: + use_auth: Enable CURVE authentication + scheduler_type: Scheduler type to use + + Returns: + BenchmarkResult showing auth impact + """ + num_clients = 10 + requests_per_client = self.config.requests_per_test // num_clients + payload_size = 1024 + + # Setup authentication if needed + server_auth: Optional[DummyServerAuthenticator] = None + client_auths: Optional[List[DummyClientAuthenticator]] = None + + if use_auth: + server_auth, client_auths = setup_benchmark_auth(num_clients=num_clients) + + # Start server + server = BenchmarkServer( + scheduler_type=scheduler_type, + compress=False, + authenticator=server_auth, + ) + + async with server: + server_address = server.get_connect_address() + + # Warmup + await warmup_connection( + server_address, + num_requests=100, + authenticator=client_auths[0] if client_auths else None, + ) + + # Create payload generator + payload_gen = PayloadGenerator(size_bytes=payload_size, kind="random") + + # Run benchmark with profiling + async def run_benchmark() -> List[RequestMetric]: + client = MultiClientRunner( + server_address=server_address, + num_clients=num_clients, + compress=False, + authenticators=client_auths, # type: ignore[arg-type] + ) + return await client.run_concurrent_clients( + requests_per_client=requests_per_client, + method="echo", + payload_generator=payload_gen, + ) + + ( + metrics, + duration, + profile_metric, + memory_metric, + ) = await self.run_with_profiling(server, run_benchmark) + + # Create result + result_config = { + "use_auth": use_auth, + "payload_size": payload_size, + "num_clients": num_clients, + "requests": self.config.requests_per_test, + "scheduler": scheduler_type.value, + } + + return self.create_result( + config=result_config, + metrics=metrics, + duration=duration, + profile_metric=profile_metric, + memory_metric=memory_metric, + ) + + +class CombinedFeaturesMatrix(BaseBenchmarkScenario): + """ + Test combinations of compression and authentication. + """ + + def __init__( + self, + config: FeatureConfig, + profiler: Optional[BenchmarkProfiler] = None, + ): + super().__init__("combined-features-matrix", profiler) + self.config = config + + async def run( # type: ignore[override] + self, + compress: bool, + use_auth: bool, + scheduler_type: SchedulerType = SchedulerType.EXIT_ORDERED, + ) -> BenchmarkResult: + """ + Run benchmark with specific feature combination. + + Args: + compress: Enable compression + use_auth: Enable authentication + scheduler_type: Scheduler type to use + + Returns: + BenchmarkResult for this feature combination + """ + num_clients = 10 + requests_per_client = self.config.requests_per_test // num_clients + payload_size = 10240 # 10KB + + # Setup authentication if needed + server_auth: Optional[DummyServerAuthenticator] = None + client_auths: Optional[List[DummyClientAuthenticator]] = None + + if use_auth: + server_auth, client_auths = setup_benchmark_auth(num_clients=num_clients) + + # Start server + server = BenchmarkServer( + scheduler_type=scheduler_type, + compress=compress, + authenticator=server_auth, + ) + + async with server: + server_address = server.get_connect_address() + + # Warmup + await warmup_connection( + server_address, + num_requests=100, + compress=compress, + authenticator=client_auths[0] if client_auths else None, + ) + + # Create payload generator + payload_gen = PayloadGenerator( + size_bytes=payload_size, + kind="compressible" if compress else "random", + ) + + # Run benchmark with profiling + async def run_benchmark() -> List[RequestMetric]: + client = MultiClientRunner( + server_address=server_address, + num_clients=num_clients, + compress=compress, + authenticators=client_auths, # type: ignore[arg-type] + ) + return await client.run_concurrent_clients( + requests_per_client=requests_per_client, + method="echo", + payload_generator=payload_gen, + ) + + ( + metrics, + duration, + profile_metric, + memory_metric, + ) = await self.run_with_profiling(server, run_benchmark) + + # Create result + result_config = { + "compress": compress, + "use_auth": use_auth, + "payload_size": payload_size, + "num_clients": num_clients, + "requests": self.config.requests_per_test, + "scheduler": scheduler_type.value, + } + + return self.create_result( + config=result_config, + metrics=metrics, + duration=duration, + profile_metric=profile_metric, + memory_metric=memory_metric, + ) diff --git a/benchmarks/scenarios/latency.py b/benchmarks/scenarios/latency.py new file mode 100644 index 0000000..4a20ff2 --- /dev/null +++ b/benchmarks/scenarios/latency.py @@ -0,0 +1,277 @@ +""" +Latency benchmark scenarios. +""" + +from typing import List, Optional + +from benchmarks.core.config import LatencyConfig +from benchmarks.core.metrics import BenchmarkResult, RequestMetric +from benchmarks.core.profiler import BenchmarkProfiler +from benchmarks.fixtures.client import MultiClientRunner, warmup_connection +from benchmarks.fixtures.server import BenchmarkServer, SchedulerType +from benchmarks.scenarios.base import BaseBenchmarkScenario +from benchmarks.utils.payload import PayloadGenerator + + +class LatencyUnderLoad(BaseBenchmarkScenario): + """ + Measure latency percentiles under different load levels. + """ + + def __init__( + self, + config: LatencyConfig, + profiler: Optional[BenchmarkProfiler] = None, + ): + super().__init__("latency-under-load", profiler) + self.config = config + + async def run( # type: ignore[override] + self, + target_load: int, # requests per second + scheduler_type: SchedulerType = SchedulerType.EXIT_ORDERED, + ) -> BenchmarkResult: + """ + Run latency benchmark under specific load. + + Args: + target_load: Target load in requests/second + scheduler_type: Scheduler type to use + + Returns: + BenchmarkResult with latency percentiles + """ + # Start server + server = BenchmarkServer( + scheduler_type=scheduler_type, + compress=False, + authenticator=None, + ) + + async with server: + server_address = server.get_connect_address() + + # Warmup + await warmup_connection(server_address, num_requests=100) + + # Calculate clients needed to achieve target load + # Assume each client can do ~100 req/s sustainably + num_clients = max(1, target_load // 100) + total_requests = target_load * self.config.duration_seconds + requests_per_client = total_requests // num_clients + + # Rate limit per client + rate_limit_per_client = target_load / num_clients + + # Create payload generator + payload_gen = PayloadGenerator( + size_bytes=self.config.payload_size, kind="random" + ) + + # Run benchmark with profiling + async def run_benchmark() -> List[RequestMetric]: + client = MultiClientRunner( + server_address=server_address, + num_clients=num_clients, + compress=False, + ) + return await client.run_concurrent_clients( + requests_per_client=requests_per_client, + method="echo", + payload_generator=payload_gen, + rate_limit=rate_limit_per_client, + timeout=10.0, + ) + + ( + metrics, + duration, + profile_metric, + memory_metric, + ) = await self.run_with_profiling(server, run_benchmark) + + # Create result + result_config = { + "target_load_rps": target_load, + "duration_seconds": self.config.duration_seconds, + "payload_size": self.config.payload_size, + "num_clients": num_clients, + "scheduler": scheduler_type.value, + } + + return self.create_result( + config=result_config, + metrics=metrics, + duration=duration, + profile_metric=profile_metric, + memory_metric=memory_metric, + ) + + +class LatencyByPayloadSize(BaseBenchmarkScenario): + """ + Measure latency percentiles for different payload sizes. + """ + + def __init__( + self, + config: LatencyConfig, + profiler: Optional[BenchmarkProfiler] = None, + ): + super().__init__("latency-by-payload-size", profiler) + self.config = config + + async def run( # type: ignore[override] + self, + payload_size: int, + scheduler_type: SchedulerType = SchedulerType.EXIT_ORDERED, + ) -> BenchmarkResult: + """ + Run latency benchmark for specific payload size. + + Args: + payload_size: Payload size in bytes + scheduler_type: Scheduler type to use + + Returns: + BenchmarkResult with latency metrics + """ + # Fixed concurrency + num_clients = 10 + requests_per_client = 500 + + # Start server + server = BenchmarkServer( + scheduler_type=scheduler_type, + compress=False, + authenticator=None, + ) + + async with server: + server_address = server.get_connect_address() + + # Warmup + await warmup_connection(server_address, num_requests=100) + + # Create payload generator + payload_gen = PayloadGenerator(size_bytes=payload_size, kind="random") + + # Run benchmark with profiling + async def run_benchmark() -> List[RequestMetric]: + client = MultiClientRunner( + server_address=server_address, + num_clients=num_clients, + compress=False, + ) + return await client.run_concurrent_clients( + requests_per_client=requests_per_client, + method="echo", + payload_generator=payload_gen, + ) + + ( + metrics, + duration, + profile_metric, + memory_metric, + ) = await self.run_with_profiling(server, run_benchmark) + + # Create result + result_config = { + "payload_size": payload_size, + "num_clients": num_clients, + "requests": num_clients * requests_per_client, + "scheduler": scheduler_type.value, + } + + return self.create_result( + config=result_config, + metrics=metrics, + duration=duration, + profile_metric=profile_metric, + memory_metric=memory_metric, + ) + + +class TailLatencyAnalysis(BaseBenchmarkScenario): + """ + Long-running test to identify tail latency and outliers. + """ + + def __init__( + self, + config: LatencyConfig, + profiler: Optional[BenchmarkProfiler] = None, + ): + super().__init__("tail-latency-analysis", profiler) + self.config = config + + async def run( # type: ignore[override] + self, + scheduler_type: SchedulerType = SchedulerType.EXIT_ORDERED, + ) -> BenchmarkResult: + """ + Run long tail latency analysis. + + Args: + scheduler_type: Scheduler type to use + + Returns: + BenchmarkResult with detailed latency distribution + """ + # Long-running test parameters + num_clients = 10 + total_requests = 10000 + requests_per_client = total_requests // num_clients + + # Start server + server = BenchmarkServer( + scheduler_type=scheduler_type, + compress=False, + authenticator=None, + ) + + async with server: + server_address = server.get_connect_address() + + # Warmup + await warmup_connection(server_address, num_requests=100) + + # Create payload generator + payload_gen = PayloadGenerator(size_bytes=1024, kind="random") + + # Run benchmark with profiling + async def run_benchmark() -> List[RequestMetric]: + client = MultiClientRunner( + server_address=server_address, + num_clients=num_clients, + compress=False, + ) + return await client.run_concurrent_clients( + requests_per_client=requests_per_client, + method="echo", + payload_generator=payload_gen, + ) + + ( + metrics, + duration, + profile_metric, + memory_metric, + ) = await self.run_with_profiling(server, run_benchmark) + + # Create result + result_config = { + "total_requests": total_requests, + "num_clients": num_clients, + "payload_size": 1024, + "scheduler": scheduler_type.value, + } + + return self.create_result( + config=result_config, + metrics=metrics, + duration=duration, + profile_metric=profile_metric, + memory_metric=memory_metric, + ) diff --git a/benchmarks/scenarios/throughput.py b/benchmarks/scenarios/throughput.py new file mode 100644 index 0000000..49ffd01 --- /dev/null +++ b/benchmarks/scenarios/throughput.py @@ -0,0 +1,264 @@ +""" +Throughput benchmark scenarios. +""" + +from typing import List, Optional + +from benchmarks.core.config import ThroughputConfig +from benchmarks.core.metrics import BenchmarkResult, RequestMetric +from benchmarks.core.profiler import BenchmarkProfiler +from benchmarks.fixtures.client import MultiClientRunner, warmup_connection +from benchmarks.fixtures.server import BenchmarkServer, SchedulerType +from benchmarks.scenarios.base import BaseBenchmarkScenario +from benchmarks.utils.payload import PayloadGenerator + + +class ThroughputByPayloadSize(BaseBenchmarkScenario): + """ + Benchmark throughput with variable payload sizes. + """ + + def __init__( + self, + config: ThroughputConfig, + profiler: Optional[BenchmarkProfiler] = None, + ): + super().__init__("throughput-by-payload-size", profiler) + self.config = config + + async def run( # type: ignore[override] + self, + payload_size: int, + scheduler_type: SchedulerType = SchedulerType.EXIT_ORDERED, + ) -> BenchmarkResult: + """ + Run throughput benchmark for a specific payload size. + + Args: + payload_size: Payload size in bytes + scheduler_type: Scheduler type to use + + Returns: + BenchmarkResult with throughput metrics + """ + # Start server + server = BenchmarkServer( + scheduler_type=scheduler_type, + compress=False, + authenticator=None, + ) + + async with server: + server_address = server.get_connect_address() + + # Warmup + await warmup_connection( + server_address, num_requests=self.config.warmup_requests + ) + + # Create payload generator + payload_gen = PayloadGenerator(size_bytes=payload_size, kind="random") + + # Run benchmark with profiling + async def run_benchmark() -> List[RequestMetric]: + client = MultiClientRunner( + server_address=server_address, + num_clients=1, + compress=False, + ) + return await client.run_concurrent_clients( + requests_per_client=self.config.requests_per_test, + method="echo", + payload_generator=payload_gen, + ) + + ( + metrics, + duration, + profile_metric, + memory_metric, + ) = await self.run_with_profiling(server, run_benchmark) + + # Create result + result_config = { + "payload_size": payload_size, + "num_clients": 1, + "requests": self.config.requests_per_test, + "scheduler": scheduler_type.value, + } + + return self.create_result( + config=result_config, + metrics=metrics, + duration=duration, + profile_metric=profile_metric, + memory_metric=memory_metric, + ) + + +class ThroughputByClientCount(BaseBenchmarkScenario): + """ + Benchmark throughput with variable client counts. + """ + + def __init__( + self, + config: ThroughputConfig, + profiler: Optional[BenchmarkProfiler] = None, + ): + super().__init__("throughput-by-client-count", profiler) + self.config = config + + async def run( # type: ignore[override] + self, + num_clients: int, + scheduler_type: SchedulerType = SchedulerType.EXIT_ORDERED, + ) -> BenchmarkResult: + """ + Run throughput benchmark with specific number of clients. + + Args: + num_clients: Number of concurrent clients + scheduler_type: Scheduler type to use + + Returns: + BenchmarkResult with throughput metrics + """ + # Start server + server = BenchmarkServer( + scheduler_type=scheduler_type, + compress=False, + authenticator=None, + ) + + async with server: + server_address = server.get_connect_address() + + # Warmup + await warmup_connection( + server_address, num_requests=self.config.warmup_requests + ) + + # Create payload generator (1KB fixed size) + payload_gen = PayloadGenerator(size_bytes=1024, kind="random") + + # Run benchmark with profiling + async def run_benchmark() -> List[RequestMetric]: + client = MultiClientRunner( + server_address=server_address, + num_clients=num_clients, + compress=False, + ) + return await client.run_concurrent_clients( + requests_per_client=self.config.requests_per_test + // num_clients, # Distribute requests across clients + method="echo", + payload_generator=payload_gen, + ) + + ( + metrics, + duration, + profile_metric, + memory_metric, + ) = await self.run_with_profiling(server, run_benchmark) + + # Create result + result_config = { + "payload_size": 1024, + "num_clients": num_clients, + "requests": self.config.requests_per_test, + "scheduler": scheduler_type.value, + } + + return self.create_result( + config=result_config, + metrics=metrics, + duration=duration, + profile_metric=profile_metric, + memory_metric=memory_metric, + ) + + +class ThroughputSchedulerComparison(BaseBenchmarkScenario): + """ + Compare throughput between different schedulers. + """ + + def __init__( + self, + config: ThroughputConfig, + profiler: Optional[BenchmarkProfiler] = None, + ): + super().__init__("throughput-scheduler-comparison", profiler) + self.config = config + + async def run( # type: ignore[override] + self, scheduler_type: SchedulerType = SchedulerType.EXIT_ORDERED + ) -> BenchmarkResult: + """ + Run throughput benchmark with specific scheduler. + + Args: + scheduler_type: Scheduler type to test + + Returns: + BenchmarkResult with throughput metrics + """ + # Fixed parameters for fair comparison + num_clients = 10 + payload_size = 1024 + + # Start server + server = BenchmarkServer( + scheduler_type=scheduler_type, + compress=False, + authenticator=None, + ) + + async with server: + server_address = server.get_connect_address() + + # Warmup + await warmup_connection( + server_address, num_requests=self.config.warmup_requests + ) + + # Create payload generator + payload_gen = PayloadGenerator(size_bytes=payload_size, kind="random") + + # Run benchmark with profiling + async def run_benchmark() -> List[RequestMetric]: + client = MultiClientRunner( + server_address=server_address, + num_clients=num_clients, + compress=False, + ) + return await client.run_concurrent_clients( + requests_per_client=self.config.requests_per_test // num_clients, + method="echo", + payload_generator=payload_gen, + ) + + ( + metrics, + duration, + profile_metric, + memory_metric, + ) = await self.run_with_profiling(server, run_benchmark) + + # Create result + result_config = { + "payload_size": payload_size, + "num_clients": num_clients, + "requests": self.config.requests_per_test, + "scheduler": scheduler_type.value, + } + + return self.create_result( + config=result_config, + metrics=metrics, + duration=duration, + profile_metric=profile_metric, + memory_metric=memory_metric, + ) diff --git a/benchmarks/utils/__init__.py b/benchmarks/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/benchmarks/utils/auth_helpers.py b/benchmarks/utils/auth_helpers.py new file mode 100644 index 0000000..a677c79 --- /dev/null +++ b/benchmarks/utils/auth_helpers.py @@ -0,0 +1,150 @@ +""" +Authentication helpers for benchmark testing. +""" + +from typing import List, Optional, Set, Tuple + +from callosum.auth import ( + AbstractClientAuthenticator, + AbstractServerAuthenticator, + AuthResult, + Credential, + Identity, + create_keypair, +) + + +class DummyServerAuthenticator(AbstractServerAuthenticator): + """ + Test server authenticator for benchmarks. + + Accepts any client with a valid keypair from the allowed list. + """ + + allowed_public_keys: Optional[Set[bytes]] + _server_keypair: Tuple[bytes, bytes] + + def __init__(self, allowed_public_keys: Optional[List[bytes]] = None): + """ + Initialize server authenticator. + + Args: + allowed_public_keys: List of allowed client public keys. + If None, accepts all clients. + """ + self.allowed_public_keys = ( + set(allowed_public_keys) if allowed_public_keys else None + ) + self._server_keypair = create_keypair() + + def get_public_key(self) -> bytes: + """Return server's public key.""" + return self._server_keypair[0] + + def get_secret_key(self) -> bytes: + """Return server's secret key.""" + return self._server_keypair[1] + + async def server_identity(self) -> Identity: + """Return the identity of the server.""" + return Identity(domain="benchmark", private_key=self._server_keypair[1]) + + async def check_client(self, creds: Credential) -> AuthResult: + """Check if the given client credential is valid.""" + if self.allowed_public_keys is None: + # Accept all clients + return AuthResult(success=True) + + if creds.public_key in self.allowed_public_keys: + return AuthResult(success=True) + + return AuthResult(success=False) + + async def server_public_key(self) -> bytes: + """Return the public key of the server.""" + return self._server_keypair[0] + + +class DummyClientAuthenticator(AbstractClientAuthenticator): + """ + Test client authenticator for benchmarks. + """ + + _server_public_key: Optional[bytes] + _client_keypair: Tuple[bytes, bytes] + + def __init__(self, server_public_key_value: Optional[bytes] = None): + """ + Initialize client authenticator. + + Args: + server_public_key_value: Server's public key. If None, trust any server. + """ + self._server_public_key = server_public_key_value + self._client_keypair = create_keypair() + + def get_public_key(self) -> bytes: + """Return client's public key.""" + return self._client_keypair[0] + + def get_secret_key(self) -> bytes: + """Return client's secret key.""" + return self._client_keypair[1] + + async def server_public_key(self) -> bytes: + """Return the public key of the server.""" + if self._server_public_key is None: + # Return empty bytes if no server key set (trust any server) + return b"" + return self._server_public_key + + async def client_identity(self) -> Identity: + """Return the identity of the client.""" + return Identity(domain="benchmark", private_key=self._client_keypair[1]) + + async def client_public_key(self) -> bytes: + """Return the public key of the client.""" + return self._client_keypair[0] + + +def create_test_keypairs(count: int) -> List[Tuple[bytes, bytes]]: + """ + Generate multiple keypairs for testing. + + Args: + count: Number of keypairs to generate + + Returns: + List of (public_key, secret_key) tuples + """ + return [create_keypair() for _ in range(count)] + + +def setup_benchmark_auth( + num_clients: int = 1, +) -> Tuple[DummyServerAuthenticator, List[DummyClientAuthenticator]]: + """ + Setup authenticators for benchmark testing. + + Args: + num_clients: Number of client authenticators to create + + Returns: + Tuple of (server_authenticator, list_of_client_authenticators) + """ + # Create server authenticator + server_auth = DummyServerAuthenticator() + + # Create client authenticators + client_authenticators = [] + for _ in range(num_clients): + client_auth = DummyClientAuthenticator( + server_public_key_value=server_auth.get_public_key() + ) + client_authenticators.append(client_auth) + + # Update server to accept these clients + allowed_keys = [ca.get_public_key() for ca in client_authenticators] + server_auth.allowed_public_keys = set(allowed_keys) + + return server_auth, client_authenticators diff --git a/benchmarks/utils/payload.py b/benchmarks/utils/payload.py new file mode 100644 index 0000000..25eb6f4 --- /dev/null +++ b/benchmarks/utils/payload.py @@ -0,0 +1,114 @@ +""" +Payload generators for benchmark tests. +""" + +import os +import random +import string +from typing import Any, Dict + + +def generate_random_payload(size_bytes: int) -> Dict[str, Any]: + """ + Generate random payload of specified size. + + Returns a dictionary with random data that will be approximately + the specified size when serialized with MessagePack. + """ + # Estimate: each char is ~1 byte, plus overhead for dict structure + # We'll generate slightly less to account for overhead + estimated_string_size = max(1, size_bytes - 100) + + random_string = "".join( + random.choices(string.ascii_letters + string.digits, k=estimated_string_size) + ) + + return {"data": random_string, "size": size_bytes} + + +def generate_compressible_payload(size_bytes: int) -> Dict[str, Any]: + """ + Generate payload that compresses well (for compression tests). + + Returns a dictionary with repetitive data that Snappy can compress effectively. + """ + # Use repetitive pattern that compresses well + pattern = "ABCDEFGHIJ" * (size_bytes // 10 + 1) + pattern = pattern[:size_bytes] + + return {"data": pattern, "size": size_bytes, "compressible": True} + + +def generate_incompressible_payload(size_bytes: int) -> Dict[str, Any]: + """ + Generate payload that doesn't compress well (random data). + + Uses cryptographically random bytes which are incompressible. + """ + random_bytes = os.urandom(size_bytes) + # Convert to hex string for JSON serialization + random_hex = random_bytes.hex() + + return {"data": random_hex, "size": size_bytes, "compressible": False} + + +def generate_structured_payload( + size_bytes: int, num_fields: int = 10 +) -> Dict[str, Any]: + """ + Generate structured payload with multiple fields. + + Useful for testing realistic scenarios with nested data structures. + """ + # Distribute size across fields + field_size = max(1, (size_bytes - 200) // num_fields) + + payload: Dict[str, Any] = { + "metadata": {"size": size_bytes, "fields": num_fields} + } + + for i in range(num_fields): + field_data = "".join( + random.choices(string.ascii_letters + string.digits, k=field_size) + ) + payload[f"field_{i}"] = field_data + + return payload + + +class PayloadGenerator: + """ + Callable payload generator for benchmarks. + + Usage: + gen = PayloadGenerator(size_bytes=1024, kind='random') + payload = gen() # Returns a new random payload + """ + + def __init__( + self, size_bytes: int, kind: str = "random", structured_fields: int = 10 + ): + """ + Initialize payload generator. + + Args: + size_bytes: Target payload size in bytes + kind: Type of payload - 'random', 'compressible', 'incompressible', 'structured' + structured_fields: Number of fields for structured payloads + """ + self.size_bytes = size_bytes + self.kind = kind + self.structured_fields = structured_fields + + def __call__(self) -> Dict[str, Any]: + """Generate and return a payload.""" + if self.kind == "compressible": + return generate_compressible_payload(self.size_bytes) + elif self.kind == "incompressible": + return generate_incompressible_payload(self.size_bytes) + elif self.kind == "structured": + return generate_structured_payload( + self.size_bytes, self.structured_fields + ) + else: # random + return generate_random_payload(self.size_bytes) diff --git a/benchmarks/utils/statistics.py b/benchmarks/utils/statistics.py new file mode 100644 index 0000000..b985f44 --- /dev/null +++ b/benchmarks/utils/statistics.py @@ -0,0 +1,220 @@ +""" +Statistical analysis utilities for benchmark results. +""" + +import math +from typing import Dict, List, Tuple + +import numpy as np + +from benchmarks.core.metrics import LatencyMetric, RequestMetric, ThroughputMetric + + +def calculate_percentiles( + values: List[float], percentiles: List[float] = [50, 95, 99, 99.9] +) -> Dict[float, float]: + """ + Calculate percentiles using numpy. + + Args: + values: List of values to analyze + percentiles: List of percentile values (0-100) + + Returns: + Dictionary mapping percentile to value + """ + if not values: + return {p: 0.0 for p in percentiles} + + arr = np.array(values) + result = {} + for p in percentiles: + result[p] = float(np.percentile(arr, p)) + return result + + +def calculate_latency_metrics(metrics: List[RequestMetric]) -> LatencyMetric: + """ + Calculate latency statistics from request metrics. + + Args: + metrics: List of request metrics + + Returns: + LatencyMetric with calculated statistics + """ + if not metrics: + return LatencyMetric( + min_ms=0.0, + max_ms=0.0, + mean_ms=0.0, + median_ms=0.0, + p95_ms=0.0, + p99_ms=0.0, + p999_ms=0.0, + stddev_ms=0.0, + ) + + latencies = [m.latency_ms for m in metrics if m.success] + + if not latencies: + return LatencyMetric( + min_ms=0.0, + max_ms=0.0, + mean_ms=0.0, + median_ms=0.0, + p95_ms=0.0, + p99_ms=0.0, + p999_ms=0.0, + stddev_ms=0.0, + ) + + arr = np.array(latencies) + percentiles = calculate_percentiles(latencies, [50, 95, 99, 99.9]) + + return LatencyMetric( + min_ms=float(np.min(arr)), + max_ms=float(np.max(arr)), + mean_ms=float(np.mean(arr)), + median_ms=percentiles[50], + p95_ms=percentiles[95], + p99_ms=percentiles[99], + p999_ms=percentiles[99.9], + stddev_ms=float(np.std(arr)), + ) + + +def calculate_throughput( + metrics: List[RequestMetric], duration: float +) -> ThroughputMetric: + """ + Calculate throughput from request metrics. + + Args: + metrics: List of request metrics + duration: Total duration in seconds + + Returns: + ThroughputMetric with calculated statistics + """ + if duration <= 0: + duration = 0.001 # Avoid division by zero + + total_requests = len(metrics) + failed_requests = sum(1 for m in metrics if not m.success) + total_bytes = sum(m.payload_size_bytes for m in metrics if m.success) + + requests_per_second = total_requests / duration + bytes_per_second = total_bytes / duration + + return ThroughputMetric( + requests_per_second=requests_per_second, + bytes_per_second=bytes_per_second, + duration_seconds=duration, + total_requests=total_requests, + failed_requests=failed_requests, + ) + + +def detect_outliers( + values: List[float], threshold: float = 3.0 +) -> Tuple[List[int], List[float]]: + """ + Identify outlier indices using standard deviation method. + + Args: + values: List of values to analyze + threshold: Number of standard deviations for outlier detection + + Returns: + Tuple of (outlier_indices, outlier_values) + """ + if len(values) < 3: + return [], [] + + arr = np.array(values) + mean = np.mean(arr) + std = np.std(arr) + + if std == 0: + return [], [] + + z_scores = np.abs((arr - mean) / std) + outlier_mask = z_scores > threshold + + outlier_indices = np.where(outlier_mask)[0].tolist() + outlier_values = arr[outlier_mask].tolist() + + return outlier_indices, outlier_values + + +def calculate_confidence_interval( + values: List[float], confidence: float = 0.95 +) -> Tuple[float, float]: + """ + Calculate confidence interval for mean. + + Args: + values: List of values + confidence: Confidence level (0-1) + + Returns: + Tuple of (lower_bound, upper_bound) + """ + if len(values) < 2: + return (0.0, 0.0) + + try: + from scipy import stats + + arr = np.array(values) + mean = np.mean(arr) + std_err = np.std(arr, ddof=1) / math.sqrt(len(arr)) + + # Using t-distribution for small samples + t_val = stats.t.ppf((1 + confidence) / 2, len(arr) - 1) + margin = t_val * std_err + + return (float(mean - margin), float(mean + margin)) + except ImportError: + # Fallback if scipy is not available + arr = np.array(values) + mean = np.mean(arr) + # Use 1.96 for 95% confidence (z-score approximation) + margin = 1.96 * np.std(arr, ddof=1) / math.sqrt(len(arr)) + return (float(mean - margin), float(mean + margin)) + + +def aggregate_metrics( + metric_lists: List[List[RequestMetric]], +) -> List[RequestMetric]: + """ + Aggregate multiple runs of request metrics. + + Args: + metric_lists: List of metric lists from multiple runs + + Returns: + Flattened list of all metrics + """ + aggregated = [] + for metrics in metric_lists: + aggregated.extend(metrics) + return aggregated + + +def calculate_regression(current_value: float, baseline_value: float) -> float: + """ + Calculate performance regression as percentage. + + Args: + current_value: Current benchmark value + baseline_value: Baseline benchmark value + + Returns: + Regression percentage (negative = improvement, positive = regression) + """ + if baseline_value == 0: + return 0.0 + + return ((current_value - baseline_value) / baseline_value) * 100.0 diff --git a/pyproject.toml b/pyproject.toml index 5488515..8daf7d2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,6 +65,14 @@ redis = [ snappy = [ "python-snappy>=0.6.1", ] +benchmark = [ + "rich>=13.9.0", + "plotly>=5.24.0", + "pandas>=2.2.0", + "numpy>=2.0.0", + "scipy>=1.11.0", + "jinja2>=3.1.0", +] [tool.setuptools] package-dir = {"" = "src"} diff --git a/uv.lock b/uv.lock index 8496f26..3a428fe 100644 --- a/uv.lock +++ b/uv.lock @@ -1,6 +1,10 @@ version = 1 revision = 3 requires-python = ">=3.11" +resolution-markers = [ + "python_full_version >= '3.12'", + "python_full_version < '3.12'", +] [[package]] name = "alabaster" @@ -73,6 +77,14 @@ dependencies = [ ] [package.optional-dependencies] +benchmark = [ + { name = "jinja2" }, + { name = "numpy" }, + { name = "pandas" }, + { name = "plotly" }, + { name = "rich" }, + { name = "scipy" }, +] docs = [ { name = "sphinx" }, { name = "sphinx-autodoc-typehints" }, @@ -113,18 +125,24 @@ dev = [ [package.metadata] requires-dist = [ { name = "attrs", specifier = ">=21.3.0" }, + { name = "jinja2", marker = "extra == 'benchmark'", specifier = ">=3.1.0" }, { name = "msgpack", specifier = ">=1.1.2" }, + { name = "numpy", marker = "extra == 'benchmark'", specifier = ">=2.0.0" }, + { name = "pandas", marker = "extra == 'benchmark'", specifier = ">=2.2.0" }, + { name = "plotly", marker = "extra == 'benchmark'", specifier = ">=5.24.0" }, { name = "python-dateutil", specifier = ">=2.8.2" }, { name = "python-snappy", marker = "extra == 'snappy'", specifier = ">=0.6.1" }, { name = "pyzmq", marker = "extra == 'zeromq'", specifier = ">=25.1.1" }, { name = "redis", marker = "extra == 'redis'", specifier = ">=4.6.0" }, + { name = "rich", marker = "extra == 'benchmark'", specifier = ">=13.9.0" }, + { name = "scipy", marker = "extra == 'benchmark'", specifier = ">=1.11.0" }, { name = "sphinx", marker = "extra == 'docs'", specifier = "~=4.3" }, { name = "sphinx-autodoc-typehints", marker = "extra == 'docs'" }, { name = "temporenc", specifier = ">=0.1" }, { name = "thriftpy2", marker = "extra == 'thrift'", specifier = ">=0.4.20" }, { name = "yarl", specifier = ">=1.8.2,!=1.9.0,!=1.9.1,!=1.9.2" }, ] -provides-extras = ["docs", "thrift", "zeromq", "redis", "snappy"] +provides-extras = ["docs", "thrift", "zeromq", "redis", "snappy", "benchmark"] [package.metadata.requires-dev] dev = [ @@ -668,7 +686,7 @@ name = "importlib-metadata" version = "8.7.1" source = { registry = "https://pypi.org/simple" } dependencies = [ - { name = "zipp" }, + { name = "zipp", marker = "python_full_version < '3.12'" }, ] sdist = { url = "https://files.pythonhosted.org/packages/f3/49/3b30cad09e7771a4982d9975a8cbf64f00d4a1ececb53297f1d9a7be1b10/importlib_metadata-8.7.1.tar.gz", hash = "sha256:49fef1ae6440c182052f407c8d34a68f72efc36db9ca90dc0113398f2fdde8bb", size = 57107, upload-time = "2025-12-21T10:00:19.278Z" } wheels = [ @@ -1157,6 +1175,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, ] +[[package]] +name = "narwhals" +version = "2.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4a/84/897fe7b6406d436ef312e57e5a1a13b4a5e7e36d1844e8d934ce8880e3d3/narwhals-2.14.0.tar.gz", hash = "sha256:98be155c3599db4d5c211e565c3190c398c87e7bf5b3cdb157dece67641946e0", size = 600648, upload-time = "2025-12-16T11:29:13.458Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/3e/b8ecc67e178919671695f64374a7ba916cf0adbf86efedc6054f38b5b8ae/narwhals-2.14.0-py3-none-any.whl", hash = "sha256:b56796c9a00179bd757d15282c540024e1d5c910b19b8c9944d836566c030acf", size = 430788, upload-time = "2025-12-16T11:29:11.699Z" }, +] + [[package]] name = "nh3" version = "0.3.2" @@ -1199,6 +1226,85 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/88/b2/d0896bdcdc8d28a7fc5717c305f1a861c26e18c05047949fb371034d98bd/nodeenv-1.10.0-py2.py3-none-any.whl", hash = "sha256:5bb13e3eed2923615535339b3c620e76779af4cb4c6a90deccc9e36b274d3827", size = 23438, upload-time = "2025-12-20T14:08:52.782Z" }, ] +[[package]] +name = "numpy" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a4/7a/6a3d14e205d292b738db449d0de649b373a59edb0d0b4493821d0a3e8718/numpy-2.4.0.tar.gz", hash = "sha256:6e504f7b16118198f138ef31ba24d985b124c2c469fe8467007cf30fd992f934", size = 20685720, upload-time = "2025-12-20T16:18:19.023Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/7e/7bae7cbcc2f8132271967aa03e03954fc1e48aa1f3bf32b29ca95fbef352/numpy-2.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:316b2f2584682318539f0bcaca5a496ce9ca78c88066579ebd11fd06f8e4741e", size = 16940166, upload-time = "2025-12-20T16:15:43.434Z" }, + { url = "https://files.pythonhosted.org/packages/0f/27/6c13f5b46776d6246ec884ac5817452672156a506d08a1f2abb39961930a/numpy-2.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2718c1de8504121714234b6f8241d0019450353276c88b9453c9c3d92e101db", size = 12641781, upload-time = "2025-12-20T16:15:45.701Z" }, + { url = "https://files.pythonhosted.org/packages/14/1c/83b4998d4860d15283241d9e5215f28b40ac31f497c04b12fa7f428ff370/numpy-2.4.0-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:21555da4ec4a0c942520ead42c3b0dc9477441e085c42b0fbdd6a084869a6f6b", size = 5470247, upload-time = "2025-12-20T16:15:47.943Z" }, + { url = "https://files.pythonhosted.org/packages/54/08/cbce72c835d937795571b0464b52069f869c9e78b0c076d416c5269d2718/numpy-2.4.0-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:413aa561266a4be2d06cd2b9665e89d9f54c543f418773076a76adcf2af08bc7", size = 6799807, upload-time = "2025-12-20T16:15:49.795Z" }, + { url = "https://files.pythonhosted.org/packages/ff/be/2e647961cd8c980591d75cdcd9e8f647d69fbe05e2a25613dc0a2ea5fb1a/numpy-2.4.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0feafc9e03128074689183031181fac0897ff169692d8492066e949041096548", size = 14701992, upload-time = "2025-12-20T16:15:51.615Z" }, + { url = "https://files.pythonhosted.org/packages/a2/fb/e1652fb8b6fd91ce6ed429143fe2e01ce714711e03e5b762615e7b36172c/numpy-2.4.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8fdfed3deaf1928fb7667d96e0567cdf58c2b370ea2ee7e586aa383ec2cb346", size = 16646871, upload-time = "2025-12-20T16:15:54.129Z" }, + { url = "https://files.pythonhosted.org/packages/62/23/d841207e63c4322842f7cd042ae981cffe715c73376dcad8235fb31debf1/numpy-2.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e06a922a469cae9a57100864caf4f8a97a1026513793969f8ba5b63137a35d25", size = 16487190, upload-time = "2025-12-20T16:15:56.147Z" }, + { url = "https://files.pythonhosted.org/packages/bc/a0/6a842c8421ebfdec0a230e65f61e0dabda6edbef443d999d79b87c273965/numpy-2.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:927ccf5cd17c48f801f4ed43a7e5673a2724bd2171460be3e3894e6e332ef83a", size = 18580762, upload-time = "2025-12-20T16:15:58.524Z" }, + { url = "https://files.pythonhosted.org/packages/0a/d1/c79e0046641186f2134dde05e6181825b911f8bdcef31b19ddd16e232847/numpy-2.4.0-cp311-cp311-win32.whl", hash = "sha256:882567b7ae57c1b1a0250208cc21a7976d8cbcc49d5a322e607e6f09c9e0bd53", size = 6233359, upload-time = "2025-12-20T16:16:00.938Z" }, + { url = "https://files.pythonhosted.org/packages/fc/f0/74965001d231f28184d6305b8cdc1b6fcd4bf23033f6cb039cfe76c9fca7/numpy-2.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:8b986403023c8f3bf8f487c2e6186afda156174d31c175f747d8934dfddf3479", size = 12601132, upload-time = "2025-12-20T16:16:02.484Z" }, + { url = "https://files.pythonhosted.org/packages/65/32/55408d0f46dfebce38017f5bd931affa7256ad6beac1a92a012e1fbc67a7/numpy-2.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:3f3096405acc48887458bbf9f6814d43785ac7ba2a57ea6442b581dedbc60ce6", size = 10573977, upload-time = "2025-12-20T16:16:04.77Z" }, + { url = "https://files.pythonhosted.org/packages/8b/ff/f6400ffec95de41c74b8e73df32e3fff1830633193a7b1e409be7fb1bb8c/numpy-2.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2a8b6bb8369abefb8bd1801b054ad50e02b3275c8614dc6e5b0373c305291037", size = 16653117, upload-time = "2025-12-20T16:16:06.709Z" }, + { url = "https://files.pythonhosted.org/packages/fd/28/6c23e97450035072e8d830a3c411bf1abd1f42c611ff9d29e3d8f55c6252/numpy-2.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2e284ca13d5a8367e43734148622caf0b261b275673823593e3e3634a6490f83", size = 12369711, upload-time = "2025-12-20T16:16:08.758Z" }, + { url = "https://files.pythonhosted.org/packages/bc/af/acbef97b630ab1bb45e6a7d01d1452e4251aa88ce680ac36e56c272120ec/numpy-2.4.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:49ff32b09f5aa0cd30a20c2b39db3e669c845589f2b7fc910365210887e39344", size = 5198355, upload-time = "2025-12-20T16:16:10.902Z" }, + { url = "https://files.pythonhosted.org/packages/c1/c8/4e0d436b66b826f2e53330adaa6311f5cac9871a5b5c31ad773b27f25a74/numpy-2.4.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:36cbfb13c152b1c7c184ddac43765db8ad672567e7bafff2cc755a09917ed2e6", size = 6545298, upload-time = "2025-12-20T16:16:12.607Z" }, + { url = "https://files.pythonhosted.org/packages/ef/27/e1f5d144ab54eac34875e79037011d511ac57b21b220063310cb96c80fbc/numpy-2.4.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:35ddc8f4914466e6fc954c76527aa91aa763682a4f6d73249ef20b418fe6effb", size = 14398387, upload-time = "2025-12-20T16:16:14.257Z" }, + { url = "https://files.pythonhosted.org/packages/67/64/4cb909dd5ab09a9a5d086eff9586e69e827b88a5585517386879474f4cf7/numpy-2.4.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dc578891de1db95b2a35001b695451767b580bb45753717498213c5ff3c41d63", size = 16363091, upload-time = "2025-12-20T16:16:17.32Z" }, + { url = "https://files.pythonhosted.org/packages/9d/9c/8efe24577523ec6809261859737cf117b0eb6fdb655abdfdc81b2e468ce4/numpy-2.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98e81648e0b36e325ab67e46b5400a7a6d4a22b8a7c8e8bbfe20e7db7906bf95", size = 16176394, upload-time = "2025-12-20T16:16:19.524Z" }, + { url = "https://files.pythonhosted.org/packages/61/f0/1687441ece7b47a62e45a1f82015352c240765c707928edd8aef875d5951/numpy-2.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d57b5046c120561ba8fa8e4030fbb8b822f3063910fa901ffadf16e2b7128ad6", size = 18287378, upload-time = "2025-12-20T16:16:22.866Z" }, + { url = "https://files.pythonhosted.org/packages/d3/6f/f868765d44e6fc466467ed810ba9d8d6db1add7d4a748abfa2a4c99a3194/numpy-2.4.0-cp312-cp312-win32.whl", hash = "sha256:92190db305a6f48734d3982f2c60fa30d6b5ee9bff10f2887b930d7b40119f4c", size = 5955432, upload-time = "2025-12-20T16:16:25.06Z" }, + { url = "https://files.pythonhosted.org/packages/d4/b5/94c1e79fcbab38d1ca15e13777477b2914dd2d559b410f96949d6637b085/numpy-2.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:680060061adb2d74ce352628cb798cfdec399068aa7f07ba9fb818b2b3305f98", size = 12306201, upload-time = "2025-12-20T16:16:26.979Z" }, + { url = "https://files.pythonhosted.org/packages/70/09/c39dadf0b13bb0768cd29d6a3aaff1fb7c6905ac40e9aaeca26b1c086e06/numpy-2.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:39699233bc72dd482da1415dcb06076e32f60eddc796a796c5fb6c5efce94667", size = 10308234, upload-time = "2025-12-20T16:16:29.417Z" }, + { url = "https://files.pythonhosted.org/packages/a7/0d/853fd96372eda07c824d24adf02e8bc92bb3731b43a9b2a39161c3667cc4/numpy-2.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a152d86a3ae00ba5f47b3acf3b827509fd0b6cb7d3259665e63dafbad22a75ea", size = 16649088, upload-time = "2025-12-20T16:16:31.421Z" }, + { url = "https://files.pythonhosted.org/packages/e3/37/cc636f1f2a9f585434e20a3e6e63422f70bfe4f7f6698e941db52ea1ac9a/numpy-2.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:39b19251dec4de8ff8496cd0806cbe27bf0684f765abb1f4809554de93785f2d", size = 12364065, upload-time = "2025-12-20T16:16:33.491Z" }, + { url = "https://files.pythonhosted.org/packages/ed/69/0b78f37ca3690969beee54103ce5f6021709134e8020767e93ba691a72f1/numpy-2.4.0-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:009bd0ea12d3c784b6639a8457537016ce5172109e585338e11334f6a7bb88ee", size = 5192640, upload-time = "2025-12-20T16:16:35.636Z" }, + { url = "https://files.pythonhosted.org/packages/1d/2a/08569f8252abf590294dbb09a430543ec8f8cc710383abfb3e75cc73aeda/numpy-2.4.0-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:5fe44e277225fd3dff6882d86d3d447205d43532c3627313d17e754fb3905a0e", size = 6541556, upload-time = "2025-12-20T16:16:37.276Z" }, + { url = "https://files.pythonhosted.org/packages/93/e9/a949885a4e177493d61519377952186b6cbfdf1d6002764c664ba28349b5/numpy-2.4.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f935c4493eda9069851058fa0d9e39dbf6286be690066509305e52912714dbb2", size = 14396562, upload-time = "2025-12-20T16:16:38.953Z" }, + { url = "https://files.pythonhosted.org/packages/99/98/9d4ad53b0e9ef901c2ef1d550d2136f5ac42d3fd2988390a6def32e23e48/numpy-2.4.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8cfa5f29a695cb7438965e6c3e8d06e0416060cf0d709c1b1c1653a939bf5c2a", size = 16351719, upload-time = "2025-12-20T16:16:41.503Z" }, + { url = "https://files.pythonhosted.org/packages/28/de/5f3711a38341d6e8dd619f6353251a0cdd07f3d6d101a8fd46f4ef87f895/numpy-2.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ba0cb30acd3ef11c94dc27fbfba68940652492bc107075e7ffe23057f9425681", size = 16176053, upload-time = "2025-12-20T16:16:44.552Z" }, + { url = "https://files.pythonhosted.org/packages/2a/5b/2a3753dc43916501b4183532e7ace862e13211042bceafa253afb5c71272/numpy-2.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:60e8c196cd82cbbd4f130b5290007e13e6de3eca79f0d4d38014769d96a7c475", size = 18277859, upload-time = "2025-12-20T16:16:47.174Z" }, + { url = "https://files.pythonhosted.org/packages/2c/c5/a18bcdd07a941db3076ef489d036ab16d2bfc2eae0cf27e5a26e29189434/numpy-2.4.0-cp313-cp313-win32.whl", hash = "sha256:5f48cb3e88fbc294dc90e215d86fbaf1c852c63dbdb6c3a3e63f45c4b57f7344", size = 5953849, upload-time = "2025-12-20T16:16:49.554Z" }, + { url = "https://files.pythonhosted.org/packages/4f/f1/719010ff8061da6e8a26e1980cf090412d4f5f8060b31f0c45d77dd67a01/numpy-2.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:a899699294f28f7be8992853c0c60741f16ff199205e2e6cdca155762cbaa59d", size = 12302840, upload-time = "2025-12-20T16:16:51.227Z" }, + { url = "https://files.pythonhosted.org/packages/f5/5a/b3d259083ed8b4d335270c76966cb6cf14a5d1b69e1a608994ac57a659e6/numpy-2.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:9198f447e1dc5647d07c9a6bbe2063cc0132728cc7175b39dbc796da5b54920d", size = 10308509, upload-time = "2025-12-20T16:16:53.313Z" }, + { url = "https://files.pythonhosted.org/packages/31/01/95edcffd1bb6c0633df4e808130545c4f07383ab629ac7e316fb44fff677/numpy-2.4.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74623f2ab5cc3f7c886add4f735d1031a1d2be4a4ae63c0546cfd74e7a31ddf6", size = 12491815, upload-time = "2025-12-20T16:16:55.496Z" }, + { url = "https://files.pythonhosted.org/packages/59/ea/5644b8baa92cc1c7163b4b4458c8679852733fa74ca49c942cfa82ded4e0/numpy-2.4.0-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:0804a8e4ab070d1d35496e65ffd3cf8114c136a2b81f61dfab0de4b218aacfd5", size = 5320321, upload-time = "2025-12-20T16:16:57.468Z" }, + { url = "https://files.pythonhosted.org/packages/26/4e/e10938106d70bc21319bd6a86ae726da37edc802ce35a3a71ecdf1fdfe7f/numpy-2.4.0-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:02a2038eb27f9443a8b266a66911e926566b5a6ffd1a689b588f7f35b81e7dc3", size = 6641635, upload-time = "2025-12-20T16:16:59.379Z" }, + { url = "https://files.pythonhosted.org/packages/b3/8d/a8828e3eaf5c0b4ab116924df82f24ce3416fa38d0674d8f708ddc6c8aac/numpy-2.4.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1889b3a3f47a7b5bee16bc25a2145bd7cb91897f815ce3499db64c7458b6d91d", size = 14456053, upload-time = "2025-12-20T16:17:01.768Z" }, + { url = "https://files.pythonhosted.org/packages/68/a1/17d97609d87d4520aa5ae2dcfb32305654550ac6a35effb946d303e594ce/numpy-2.4.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85eef4cb5625c47ee6425c58a3502555e10f45ee973da878ac8248ad58c136f3", size = 16401702, upload-time = "2025-12-20T16:17:04.235Z" }, + { url = "https://files.pythonhosted.org/packages/18/32/0f13c1b2d22bea1118356b8b963195446f3af124ed7a5adfa8fdecb1b6ca/numpy-2.4.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6dc8b7e2f4eb184b37655195f421836cfae6f58197b67e3ffc501f1333d993fa", size = 16242493, upload-time = "2025-12-20T16:17:06.856Z" }, + { url = "https://files.pythonhosted.org/packages/ae/23/48f21e3d309fbc137c068a1475358cbd3a901b3987dcfc97a029ab3068e2/numpy-2.4.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:44aba2f0cafd287871a495fb3163408b0bd25bbce135c6f621534a07f4f7875c", size = 18324222, upload-time = "2025-12-20T16:17:09.392Z" }, + { url = "https://files.pythonhosted.org/packages/ac/52/41f3d71296a3dcaa4f456aaa3c6fc8e745b43d0552b6bde56571bb4b4a0f/numpy-2.4.0-cp313-cp313t-win32.whl", hash = "sha256:20c115517513831860c573996e395707aa9fb691eb179200125c250e895fcd93", size = 6076216, upload-time = "2025-12-20T16:17:11.437Z" }, + { url = "https://files.pythonhosted.org/packages/35/ff/46fbfe60ab0710d2a2b16995f708750307d30eccbb4c38371ea9e986866e/numpy-2.4.0-cp313-cp313t-win_amd64.whl", hash = "sha256:b48e35f4ab6f6a7597c46e301126ceba4c44cd3280e3750f85db48b082624fa4", size = 12444263, upload-time = "2025-12-20T16:17:13.182Z" }, + { url = "https://files.pythonhosted.org/packages/a3/e3/9189ab319c01d2ed556c932ccf55064c5d75bb5850d1df7a482ce0badead/numpy-2.4.0-cp313-cp313t-win_arm64.whl", hash = "sha256:4d1cfce39e511069b11e67cd0bd78ceff31443b7c9e5c04db73c7a19f572967c", size = 10378265, upload-time = "2025-12-20T16:17:15.211Z" }, + { url = "https://files.pythonhosted.org/packages/ab/ed/52eac27de39d5e5a6c9aadabe672bc06f55e24a3d9010cd1183948055d76/numpy-2.4.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:c95eb6db2884917d86cde0b4d4cf31adf485c8ec36bf8696dd66fa70de96f36b", size = 16647476, upload-time = "2025-12-20T16:17:17.671Z" }, + { url = "https://files.pythonhosted.org/packages/77/c0/990ce1b7fcd4e09aeaa574e2a0a839589e4b08b2ca68070f1acb1fea6736/numpy-2.4.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:65167da969cd1ec3a1df31cb221ca3a19a8aaa25370ecb17d428415e93c1935e", size = 12374563, upload-time = "2025-12-20T16:17:20.216Z" }, + { url = "https://files.pythonhosted.org/packages/37/7c/8c5e389c6ae8f5fd2277a988600d79e9625db3fff011a2d87ac80b881a4c/numpy-2.4.0-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:3de19cfecd1465d0dcf8a5b5ea8b3155b42ed0b639dba4b71e323d74f2a3be5e", size = 5203107, upload-time = "2025-12-20T16:17:22.47Z" }, + { url = "https://files.pythonhosted.org/packages/e6/94/ca5b3bd6a8a70a5eec9a0b8dd7f980c1eff4b8a54970a9a7fef248ef564f/numpy-2.4.0-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:6c05483c3136ac4c91b4e81903cb53a8707d316f488124d0398499a4f8e8ef51", size = 6538067, upload-time = "2025-12-20T16:17:24.001Z" }, + { url = "https://files.pythonhosted.org/packages/79/43/993eb7bb5be6761dde2b3a3a594d689cec83398e3f58f4758010f3b85727/numpy-2.4.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:36667db4d6c1cea79c8930ab72fadfb4060feb4bfe724141cd4bd064d2e5f8ce", size = 14411926, upload-time = "2025-12-20T16:17:25.822Z" }, + { url = "https://files.pythonhosted.org/packages/03/75/d4c43b61de473912496317a854dac54f1efec3eeb158438da6884b70bb90/numpy-2.4.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9a818668b674047fd88c4cddada7ab8f1c298812783e8328e956b78dc4807f9f", size = 16354295, upload-time = "2025-12-20T16:17:28.308Z" }, + { url = "https://files.pythonhosted.org/packages/b8/0a/b54615b47ee8736a6461a4bb6749128dd3435c5a759d5663f11f0e9af4ac/numpy-2.4.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:1ee32359fb7543b7b7bd0b2f46294db27e29e7bbdf70541e81b190836cd83ded", size = 16190242, upload-time = "2025-12-20T16:17:30.993Z" }, + { url = "https://files.pythonhosted.org/packages/98/ce/ea207769aacad6246525ec6c6bbd66a2bf56c72443dc10e2f90feed29290/numpy-2.4.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e493962256a38f58283de033d8af176c5c91c084ea30f15834f7545451c42059", size = 18280875, upload-time = "2025-12-20T16:17:33.327Z" }, + { url = "https://files.pythonhosted.org/packages/17/ef/ec409437aa962ea372ed601c519a2b141701683ff028f894b7466f0ab42b/numpy-2.4.0-cp314-cp314-win32.whl", hash = "sha256:6bbaebf0d11567fa8926215ae731e1d58e6ec28a8a25235b8a47405d301332db", size = 6002530, upload-time = "2025-12-20T16:17:35.729Z" }, + { url = "https://files.pythonhosted.org/packages/5f/4a/5cb94c787a3ed1ac65e1271b968686521169a7b3ec0b6544bb3ca32960b0/numpy-2.4.0-cp314-cp314-win_amd64.whl", hash = "sha256:3d857f55e7fdf7c38ab96c4558c95b97d1c685be6b05c249f5fdafcbd6f9899e", size = 12435890, upload-time = "2025-12-20T16:17:37.599Z" }, + { url = "https://files.pythonhosted.org/packages/48/a0/04b89db963af9de1104975e2544f30de89adbf75b9e75f7dd2599be12c79/numpy-2.4.0-cp314-cp314-win_arm64.whl", hash = "sha256:bb50ce5fb202a26fd5404620e7ef820ad1ab3558b444cb0b55beb7ef66cd2d63", size = 10591892, upload-time = "2025-12-20T16:17:39.649Z" }, + { url = "https://files.pythonhosted.org/packages/53/e5/d74b5ccf6712c06c7a545025a6a71bfa03bdc7e0568b405b0d655232fd92/numpy-2.4.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:355354388cba60f2132df297e2d53053d4063f79077b67b481d21276d61fc4df", size = 12494312, upload-time = "2025-12-20T16:17:41.714Z" }, + { url = "https://files.pythonhosted.org/packages/c2/08/3ca9cc2ddf54dfee7ae9a6479c071092a228c68aef08252aa08dac2af002/numpy-2.4.0-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:1d8f9fde5f6dc1b6fc34df8162f3b3079365468703fee7f31d4e0cc8c63baed9", size = 5322862, upload-time = "2025-12-20T16:17:44.145Z" }, + { url = "https://files.pythonhosted.org/packages/87/74/0bb63a68394c0c1e52670cfff2e309afa41edbe11b3327d9af29e4383f34/numpy-2.4.0-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:e0434aa22c821f44eeb4c650b81c7fbdd8c0122c6c4b5a576a76d5a35625ecd9", size = 6644986, upload-time = "2025-12-20T16:17:46.203Z" }, + { url = "https://files.pythonhosted.org/packages/06/8f/9264d9bdbcf8236af2823623fe2f3981d740fc3461e2787e231d97c38c28/numpy-2.4.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:40483b2f2d3ba7aad426443767ff5632ec3156ef09742b96913787d13c336471", size = 14457958, upload-time = "2025-12-20T16:17:48.017Z" }, + { url = "https://files.pythonhosted.org/packages/8c/d9/f9a69ae564bbc7236a35aa883319364ef5fd41f72aa320cc1cbe66148fe2/numpy-2.4.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d9e6a7664ddd9746e20b7325351fe1a8408d0a2bf9c63b5e898290ddc8f09544", size = 16398394, upload-time = "2025-12-20T16:17:50.409Z" }, + { url = "https://files.pythonhosted.org/packages/34/c7/39241501408dde7f885d241a98caba5421061a2c6d2b2197ac5e3aa842d8/numpy-2.4.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ecb0019d44f4cdb50b676c5d0cb4b1eae8e15d1ed3d3e6639f986fc92b2ec52c", size = 16241044, upload-time = "2025-12-20T16:17:52.661Z" }, + { url = "https://files.pythonhosted.org/packages/7c/95/cae7effd90e065a95e59fe710eeee05d7328ed169776dfdd9f789e032125/numpy-2.4.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d0ffd9e2e4441c96a9c91ec1783285d80bf835b677853fc2770a89d50c1e48ac", size = 18321772, upload-time = "2025-12-20T16:17:54.947Z" }, + { url = "https://files.pythonhosted.org/packages/96/df/3c6c279accd2bfb968a76298e5b276310bd55d243df4fa8ac5816d79347d/numpy-2.4.0-cp314-cp314t-win32.whl", hash = "sha256:77f0d13fa87036d7553bf81f0e1fe3ce68d14c9976c9851744e4d3e91127e95f", size = 6148320, upload-time = "2025-12-20T16:17:57.249Z" }, + { url = "https://files.pythonhosted.org/packages/92/8d/f23033cce252e7a75cae853d17f582e86534c46404dea1c8ee094a9d6d84/numpy-2.4.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b1f5b45829ac1848893f0ddf5cb326110604d6df96cdc255b0bf9edd154104d4", size = 12623460, upload-time = "2025-12-20T16:17:58.963Z" }, + { url = "https://files.pythonhosted.org/packages/a4/4f/1f8475907d1a7c4ef9020edf7f39ea2422ec896849245f00688e4b268a71/numpy-2.4.0-cp314-cp314t-win_arm64.whl", hash = "sha256:23a3e9d1a6f360267e8fbb38ba5db355a6a7e9be71d7fce7ab3125e88bb646c8", size = 10661799, upload-time = "2025-12-20T16:18:01.078Z" }, + { url = "https://files.pythonhosted.org/packages/4b/ef/088e7c7342f300aaf3ee5f2c821c4b9996a1bef2aaf6a49cc8ab4883758e/numpy-2.4.0-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:b54c83f1c0c0f1d748dca0af516062b8829d53d1f0c402be24b4257a9c48ada6", size = 16819003, upload-time = "2025-12-20T16:18:03.41Z" }, + { url = "https://files.pythonhosted.org/packages/ff/ce/a53017b5443b4b84517182d463fc7bcc2adb4faa8b20813f8e5f5aeb5faa/numpy-2.4.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:aabb081ca0ec5d39591fc33018cd4b3f96e1a2dd6756282029986d00a785fba4", size = 12567105, upload-time = "2025-12-20T16:18:05.594Z" }, + { url = "https://files.pythonhosted.org/packages/77/58/5ff91b161f2ec650c88a626c3905d938c89aaadabd0431e6d9c1330c83e2/numpy-2.4.0-pp311-pypy311_pp73-macosx_14_0_arm64.whl", hash = "sha256:8eafe7c36c8430b7794edeab3087dec7bf31d634d92f2af9949434b9d1964cba", size = 5395590, upload-time = "2025-12-20T16:18:08.031Z" }, + { url = "https://files.pythonhosted.org/packages/1d/4e/f1a084106df8c2df8132fc437e56987308e0524836aa7733721c8429d4fe/numpy-2.4.0-pp311-pypy311_pp73-macosx_14_0_x86_64.whl", hash = "sha256:2f585f52b2baf07ff3356158d9268ea095e221371f1074fadea2f42544d58b4d", size = 6709947, upload-time = "2025-12-20T16:18:09.836Z" }, + { url = "https://files.pythonhosted.org/packages/63/09/3d8aeb809c0332c3f642da812ac2e3d74fc9252b3021f8c30c82e99e3f3d/numpy-2.4.0-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:32ed06d0fe9cae27d8fb5f400c63ccee72370599c75e683a6358dd3a4fb50aaf", size = 14535119, upload-time = "2025-12-20T16:18:12.105Z" }, + { url = "https://files.pythonhosted.org/packages/fd/7f/68f0fc43a2cbdc6bb239160c754d87c922f60fbaa0fa3cd3d312b8a7f5ee/numpy-2.4.0-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:57c540ed8fb1f05cb997c6761cd56db72395b0d6985e90571ff660452ade4f98", size = 16475815, upload-time = "2025-12-20T16:18:14.433Z" }, + { url = "https://files.pythonhosted.org/packages/11/73/edeacba3167b1ca66d51b1a5a14697c2c40098b5ffa01811c67b1785a5ab/numpy-2.4.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:a39fb973a726e63223287adc6dafe444ce75af952d711e400f3bf2b36ef55a7b", size = 12489376, upload-time = "2025-12-20T16:18:16.524Z" }, +] + [[package]] name = "packaging" version = "25.0" @@ -1208,6 +1314,60 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, ] +[[package]] +name = "pandas" +version = "2.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "python-dateutil" }, + { name = "pytz" }, + { name = "tzdata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/33/01/d40b85317f86cf08d853a4f495195c73815fdf205eef3993821720274518/pandas-2.3.3.tar.gz", hash = "sha256:e05e1af93b977f7eafa636d043f9f94c7ee3ac81af99c13508215942e64c993b", size = 4495223, upload-time = "2025-09-29T23:34:51.853Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/fa/7ac648108144a095b4fb6aa3de1954689f7af60a14cf25583f4960ecb878/pandas-2.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:602b8615ebcc4a0c1751e71840428ddebeb142ec02c786e8ad6b1ce3c8dec523", size = 11578790, upload-time = "2025-09-29T23:18:30.065Z" }, + { url = "https://files.pythonhosted.org/packages/9b/35/74442388c6cf008882d4d4bdfc4109be87e9b8b7ccd097ad1e7f006e2e95/pandas-2.3.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8fe25fc7b623b0ef6b5009149627e34d2a4657e880948ec3c840e9402e5c1b45", size = 10833831, upload-time = "2025-09-29T23:38:56.071Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e4/de154cbfeee13383ad58d23017da99390b91d73f8c11856f2095e813201b/pandas-2.3.3-cp311-cp311-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b468d3dad6ff947df92dcb32ede5b7bd41a9b3cceef0a30ed925f6d01fb8fa66", size = 12199267, upload-time = "2025-09-29T23:18:41.627Z" }, + { url = "https://files.pythonhosted.org/packages/bf/c9/63f8d545568d9ab91476b1818b4741f521646cbdd151c6efebf40d6de6f7/pandas-2.3.3-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b98560e98cb334799c0b07ca7967ac361a47326e9b4e5a7dfb5ab2b1c9d35a1b", size = 12789281, upload-time = "2025-09-29T23:18:56.834Z" }, + { url = "https://files.pythonhosted.org/packages/f2/00/a5ac8c7a0e67fd1a6059e40aa08fa1c52cc00709077d2300e210c3ce0322/pandas-2.3.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1d37b5848ba49824e5c30bedb9c830ab9b7751fd049bc7914533e01c65f79791", size = 13240453, upload-time = "2025-09-29T23:19:09.247Z" }, + { url = "https://files.pythonhosted.org/packages/27/4d/5c23a5bc7bd209231618dd9e606ce076272c9bc4f12023a70e03a86b4067/pandas-2.3.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:db4301b2d1f926ae677a751eb2bd0e8c5f5319c9cb3f88b0becbbb0b07b34151", size = 13890361, upload-time = "2025-09-29T23:19:25.342Z" }, + { url = "https://files.pythonhosted.org/packages/8e/59/712db1d7040520de7a4965df15b774348980e6df45c129b8c64d0dbe74ef/pandas-2.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:f086f6fe114e19d92014a1966f43a3e62285109afe874f067f5abbdcbb10e59c", size = 11348702, upload-time = "2025-09-29T23:19:38.296Z" }, + { url = "https://files.pythonhosted.org/packages/9c/fb/231d89e8637c808b997d172b18e9d4a4bc7bf31296196c260526055d1ea0/pandas-2.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d21f6d74eb1725c2efaa71a2bfc661a0689579b58e9c0ca58a739ff0b002b53", size = 11597846, upload-time = "2025-09-29T23:19:48.856Z" }, + { url = "https://files.pythonhosted.org/packages/5c/bd/bf8064d9cfa214294356c2d6702b716d3cf3bb24be59287a6a21e24cae6b/pandas-2.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3fd2f887589c7aa868e02632612ba39acb0b8948faf5cc58f0850e165bd46f35", size = 10729618, upload-time = "2025-09-29T23:39:08.659Z" }, + { url = "https://files.pythonhosted.org/packages/57/56/cf2dbe1a3f5271370669475ead12ce77c61726ffd19a35546e31aa8edf4e/pandas-2.3.3-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ecaf1e12bdc03c86ad4a7ea848d66c685cb6851d807a26aa245ca3d2017a1908", size = 11737212, upload-time = "2025-09-29T23:19:59.765Z" }, + { url = "https://files.pythonhosted.org/packages/e5/63/cd7d615331b328e287d8233ba9fdf191a9c2d11b6af0c7a59cfcec23de68/pandas-2.3.3-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b3d11d2fda7eb164ef27ffc14b4fcab16a80e1ce67e9f57e19ec0afaf715ba89", size = 12362693, upload-time = "2025-09-29T23:20:14.098Z" }, + { url = "https://files.pythonhosted.org/packages/a6/de/8b1895b107277d52f2b42d3a6806e69cfef0d5cf1d0ba343470b9d8e0a04/pandas-2.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a68e15f780eddf2b07d242e17a04aa187a7ee12b40b930bfdd78070556550e98", size = 12771002, upload-time = "2025-09-29T23:20:26.76Z" }, + { url = "https://files.pythonhosted.org/packages/87/21/84072af3187a677c5893b170ba2c8fbe450a6ff911234916da889b698220/pandas-2.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:371a4ab48e950033bcf52b6527eccb564f52dc826c02afd9a1bc0ab731bba084", size = 13450971, upload-time = "2025-09-29T23:20:41.344Z" }, + { url = "https://files.pythonhosted.org/packages/86/41/585a168330ff063014880a80d744219dbf1dd7a1c706e75ab3425a987384/pandas-2.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:a16dcec078a01eeef8ee61bf64074b4e524a2a3f4b3be9326420cabe59c4778b", size = 10992722, upload-time = "2025-09-29T23:20:54.139Z" }, + { url = "https://files.pythonhosted.org/packages/cd/4b/18b035ee18f97c1040d94debd8f2e737000ad70ccc8f5513f4eefad75f4b/pandas-2.3.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:56851a737e3470de7fa88e6131f41281ed440d29a9268dcbf0002da5ac366713", size = 11544671, upload-time = "2025-09-29T23:21:05.024Z" }, + { url = "https://files.pythonhosted.org/packages/31/94/72fac03573102779920099bcac1c3b05975c2cb5f01eac609faf34bed1ca/pandas-2.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:bdcd9d1167f4885211e401b3036c0c8d9e274eee67ea8d0758a256d60704cfe8", size = 10680807, upload-time = "2025-09-29T23:21:15.979Z" }, + { url = "https://files.pythonhosted.org/packages/16/87/9472cf4a487d848476865321de18cc8c920b8cab98453ab79dbbc98db63a/pandas-2.3.3-cp313-cp313-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e32e7cc9af0f1cc15548288a51a3b681cc2a219faa838e995f7dc53dbab1062d", size = 11709872, upload-time = "2025-09-29T23:21:27.165Z" }, + { url = "https://files.pythonhosted.org/packages/15/07/284f757f63f8a8d69ed4472bfd85122bd086e637bf4ed09de572d575a693/pandas-2.3.3-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:318d77e0e42a628c04dc56bcef4b40de67918f7041c2b061af1da41dcff670ac", size = 12306371, upload-time = "2025-09-29T23:21:40.532Z" }, + { url = "https://files.pythonhosted.org/packages/33/81/a3afc88fca4aa925804a27d2676d22dcd2031c2ebe08aabd0ae55b9ff282/pandas-2.3.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4e0a175408804d566144e170d0476b15d78458795bb18f1304fb94160cabf40c", size = 12765333, upload-time = "2025-09-29T23:21:55.77Z" }, + { url = "https://files.pythonhosted.org/packages/8d/0f/b4d4ae743a83742f1153464cf1a8ecfafc3ac59722a0b5c8602310cb7158/pandas-2.3.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:93c2d9ab0fc11822b5eece72ec9587e172f63cff87c00b062f6e37448ced4493", size = 13418120, upload-time = "2025-09-29T23:22:10.109Z" }, + { url = "https://files.pythonhosted.org/packages/4f/c7/e54682c96a895d0c808453269e0b5928a07a127a15704fedb643e9b0a4c8/pandas-2.3.3-cp313-cp313-win_amd64.whl", hash = "sha256:f8bfc0e12dc78f777f323f55c58649591b2cd0c43534e8355c51d3fede5f4dee", size = 10993991, upload-time = "2025-09-29T23:25:04.889Z" }, + { url = "https://files.pythonhosted.org/packages/f9/ca/3f8d4f49740799189e1395812f3bf23b5e8fc7c190827d55a610da72ce55/pandas-2.3.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:75ea25f9529fdec2d2e93a42c523962261e567d250b0013b16210e1d40d7c2e5", size = 12048227, upload-time = "2025-09-29T23:22:24.343Z" }, + { url = "https://files.pythonhosted.org/packages/0e/5a/f43efec3e8c0cc92c4663ccad372dbdff72b60bdb56b2749f04aa1d07d7e/pandas-2.3.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74ecdf1d301e812db96a465a525952f4dde225fdb6d8e5a521d47e1f42041e21", size = 11411056, upload-time = "2025-09-29T23:22:37.762Z" }, + { url = "https://files.pythonhosted.org/packages/46/b1/85331edfc591208c9d1a63a06baa67b21d332e63b7a591a5ba42a10bb507/pandas-2.3.3-cp313-cp313t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6435cb949cb34ec11cc9860246ccb2fdc9ecd742c12d3304989017d53f039a78", size = 11645189, upload-time = "2025-09-29T23:22:51.688Z" }, + { url = "https://files.pythonhosted.org/packages/44/23/78d645adc35d94d1ac4f2a3c4112ab6f5b8999f4898b8cdf01252f8df4a9/pandas-2.3.3-cp313-cp313t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:900f47d8f20860de523a1ac881c4c36d65efcb2eb850e6948140fa781736e110", size = 12121912, upload-time = "2025-09-29T23:23:05.042Z" }, + { url = "https://files.pythonhosted.org/packages/53/da/d10013df5e6aaef6b425aa0c32e1fc1f3e431e4bcabd420517dceadce354/pandas-2.3.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a45c765238e2ed7d7c608fc5bc4a6f88b642f2f01e70c0c23d2224dd21829d86", size = 12712160, upload-time = "2025-09-29T23:23:28.57Z" }, + { url = "https://files.pythonhosted.org/packages/bd/17/e756653095a083d8a37cbd816cb87148debcfcd920129b25f99dd8d04271/pandas-2.3.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:c4fc4c21971a1a9f4bdb4c73978c7f7256caa3e62b323f70d6cb80db583350bc", size = 13199233, upload-time = "2025-09-29T23:24:24.876Z" }, + { url = "https://files.pythonhosted.org/packages/04/fd/74903979833db8390b73b3a8a7d30d146d710bd32703724dd9083950386f/pandas-2.3.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:ee15f284898e7b246df8087fc82b87b01686f98ee67d85a17b7ab44143a3a9a0", size = 11540635, upload-time = "2025-09-29T23:25:52.486Z" }, + { url = "https://files.pythonhosted.org/packages/21/00/266d6b357ad5e6d3ad55093a7e8efc7dd245f5a842b584db9f30b0f0a287/pandas-2.3.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1611aedd912e1ff81ff41c745822980c49ce4a7907537be8692c8dbc31924593", size = 10759079, upload-time = "2025-09-29T23:26:33.204Z" }, + { url = "https://files.pythonhosted.org/packages/ca/05/d01ef80a7a3a12b2f8bbf16daba1e17c98a2f039cbc8e2f77a2c5a63d382/pandas-2.3.3-cp314-cp314-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6d2cefc361461662ac48810cb14365a365ce864afe85ef1f447ff5a1e99ea81c", size = 11814049, upload-time = "2025-09-29T23:27:15.384Z" }, + { url = "https://files.pythonhosted.org/packages/15/b2/0e62f78c0c5ba7e3d2c5945a82456f4fac76c480940f805e0b97fcbc2f65/pandas-2.3.3-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ee67acbbf05014ea6c763beb097e03cd629961c8a632075eeb34247120abcb4b", size = 12332638, upload-time = "2025-09-29T23:27:51.625Z" }, + { url = "https://files.pythonhosted.org/packages/c5/33/dd70400631b62b9b29c3c93d2feee1d0964dc2bae2e5ad7a6c73a7f25325/pandas-2.3.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c46467899aaa4da076d5abc11084634e2d197e9460643dd455ac3db5856b24d6", size = 12886834, upload-time = "2025-09-29T23:28:21.289Z" }, + { url = "https://files.pythonhosted.org/packages/d3/18/b5d48f55821228d0d2692b34fd5034bb185e854bdb592e9c640f6290e012/pandas-2.3.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:6253c72c6a1d990a410bc7de641d34053364ef8bcd3126f7e7450125887dffe3", size = 13409925, upload-time = "2025-09-29T23:28:58.261Z" }, + { url = "https://files.pythonhosted.org/packages/a6/3d/124ac75fcd0ecc09b8fdccb0246ef65e35b012030defb0e0eba2cbbbe948/pandas-2.3.3-cp314-cp314-win_amd64.whl", hash = "sha256:1b07204a219b3b7350abaae088f451860223a52cfb8a6c53358e7948735158e5", size = 11109071, upload-time = "2025-09-29T23:32:27.484Z" }, + { url = "https://files.pythonhosted.org/packages/89/9c/0e21c895c38a157e0faa1fb64587a9226d6dd46452cac4532d80c3c4a244/pandas-2.3.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2462b1a365b6109d275250baaae7b760fd25c726aaca0054649286bcfbb3e8ec", size = 12048504, upload-time = "2025-09-29T23:29:31.47Z" }, + { url = "https://files.pythonhosted.org/packages/d7/82/b69a1c95df796858777b68fbe6a81d37443a33319761d7c652ce77797475/pandas-2.3.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0242fe9a49aa8b4d78a4fa03acb397a58833ef6199e9aa40a95f027bb3a1b6e7", size = 11410702, upload-time = "2025-09-29T23:29:54.591Z" }, + { url = "https://files.pythonhosted.org/packages/f9/88/702bde3ba0a94b8c73a0181e05144b10f13f29ebfc2150c3a79062a8195d/pandas-2.3.3-cp314-cp314t-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a21d830e78df0a515db2b3d2f5570610f5e6bd2e27749770e8bb7b524b89b450", size = 11634535, upload-time = "2025-09-29T23:30:21.003Z" }, + { url = "https://files.pythonhosted.org/packages/a4/1e/1bac1a839d12e6a82ec6cb40cda2edde64a2013a66963293696bbf31fbbb/pandas-2.3.3-cp314-cp314t-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2e3ebdb170b5ef78f19bfb71b0dc5dc58775032361fa188e814959b74d726dd5", size = 12121582, upload-time = "2025-09-29T23:30:43.391Z" }, + { url = "https://files.pythonhosted.org/packages/44/91/483de934193e12a3b1d6ae7c8645d083ff88dec75f46e827562f1e4b4da6/pandas-2.3.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:d051c0e065b94b7a3cea50eb1ec32e912cd96dba41647eb24104b6c6c14c5788", size = 12699963, upload-time = "2025-09-29T23:31:10.009Z" }, + { url = "https://files.pythonhosted.org/packages/70/44/5191d2e4026f86a2a109053e194d3ba7a31a2d10a9c2348368c63ed4e85a/pandas-2.3.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3869faf4bd07b3b66a9f462417d0ca3a9df29a9f6abd5d0d0dbab15dac7abe87", size = 13202175, upload-time = "2025-09-29T23:31:59.173Z" }, +] + [[package]] name = "pathspec" version = "0.12.1" @@ -1226,6 +1386,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cb/28/3bfe2fa5a7b9c46fe7e13c97bda14c895fb10fa2ebf1d0abb90e0cea7ee1/platformdirs-4.5.1-py3-none-any.whl", hash = "sha256:d03afa3963c806a9bed9d5125c8f4cb2fdaf74a55ab60e5d59b3fde758104d31", size = 18731, upload-time = "2025-12-05T13:52:56.823Z" }, ] +[[package]] +name = "plotly" +version = "6.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "narwhals" }, + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/05/1199e2a03ce6637960bc1e951ca0f928209a48cfceb57355806a88f214cf/plotly-6.5.0.tar.gz", hash = "sha256:d5d38224883fd38c1409bef7d6a8dc32b74348d39313f3c52ca998b8e447f5c8", size = 7013624, upload-time = "2025-11-17T18:39:24.523Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/c3/3031c931098de393393e1f93a38dc9ed6805d86bb801acc3cf2d5bd1e6b7/plotly-6.5.0-py3-none-any.whl", hash = "sha256:5ac851e100367735250206788a2b1325412aa4a4917a4fe3e6f0bc5aa6f3d90a", size = 9893174, upload-time = "2025-11-17T18:39:20.351Z" }, +] + [[package]] name = "pluggy" version = "1.6.0" @@ -1479,6 +1652,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/86/c1/0ee413ddd639aebf22c85d6db39f136ccc10e6a4b4dd275a92b5c839de8d/python_snappy-0.7.3-py3-none-any.whl", hash = "sha256:074c0636cfcd97e7251330f428064050ac81a52c62ed884fc2ddebbb60ed7f50", size = 9155, upload-time = "2024-08-29T13:16:04.773Z" }, ] +[[package]] +name = "pytz" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" }, +] + [[package]] name = "pywin32-ctypes" version = "0.2.3" @@ -1718,6 +1900,77 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/31/5e/d3a6fdf61f6373e53bfb45d6819a72dfef741bc8a9ff31c64496688e7c39/ruff_lsp-0.0.62-py3-none-any.whl", hash = "sha256:fb6c04a0cb09bb3ae316121b084ff09497edd01df58b36fa431f14515c63029e", size = 20980, upload-time = "2025-02-10T13:18:31.034Z" }, ] +[[package]] +name = "scipy" +version = "1.16.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0a/ca/d8ace4f98322d01abcd52d381134344bf7b431eba7ed8b42bdea5a3c2ac9/scipy-1.16.3.tar.gz", hash = "sha256:01e87659402762f43bd2fee13370553a17ada367d42e7487800bf2916535aecb", size = 30597883, upload-time = "2025-10-28T17:38:54.068Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9b/5f/6f37d7439de1455ce9c5a556b8d1db0979f03a796c030bafdf08d35b7bf9/scipy-1.16.3-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:40be6cf99e68b6c4321e9f8782e7d5ff8265af28ef2cd56e9c9b2638fa08ad97", size = 36630881, upload-time = "2025-10-28T17:31:47.104Z" }, + { url = "https://files.pythonhosted.org/packages/7c/89/d70e9f628749b7e4db2aa4cd89735502ff3f08f7b9b27d2e799485987cd9/scipy-1.16.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:8be1ca9170fcb6223cc7c27f4305d680ded114a1567c0bd2bfcbf947d1b17511", size = 28941012, upload-time = "2025-10-28T17:31:53.411Z" }, + { url = "https://files.pythonhosted.org/packages/a8/a8/0e7a9a6872a923505dbdf6bb93451edcac120363131c19013044a1e7cb0c/scipy-1.16.3-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:bea0a62734d20d67608660f69dcda23e7f90fb4ca20974ab80b6ed40df87a005", size = 20931935, upload-time = "2025-10-28T17:31:57.361Z" }, + { url = "https://files.pythonhosted.org/packages/bd/c7/020fb72bd79ad798e4dbe53938543ecb96b3a9ac3fe274b7189e23e27353/scipy-1.16.3-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:2a207a6ce9c24f1951241f4693ede2d393f59c07abc159b2cb2be980820e01fb", size = 23534466, upload-time = "2025-10-28T17:32:01.875Z" }, + { url = "https://files.pythonhosted.org/packages/be/a0/668c4609ce6dbf2f948e167836ccaf897f95fb63fa231c87da7558a374cd/scipy-1.16.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:532fb5ad6a87e9e9cd9c959b106b73145a03f04c7d57ea3e6f6bb60b86ab0876", size = 33593618, upload-time = "2025-10-28T17:32:06.902Z" }, + { url = "https://files.pythonhosted.org/packages/ca/6e/8942461cf2636cdae083e3eb72622a7fbbfa5cf559c7d13ab250a5dbdc01/scipy-1.16.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0151a0749efeaaab78711c78422d413c583b8cdd2011a3c1d6c794938ee9fdb2", size = 35899798, upload-time = "2025-10-28T17:32:12.665Z" }, + { url = "https://files.pythonhosted.org/packages/79/e8/d0f33590364cdbd67f28ce79368b373889faa4ee959588beddf6daef9abe/scipy-1.16.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b7180967113560cca57418a7bc719e30366b47959dd845a93206fbed693c867e", size = 36226154, upload-time = "2025-10-28T17:32:17.961Z" }, + { url = "https://files.pythonhosted.org/packages/39/c1/1903de608c0c924a1749c590064e65810f8046e437aba6be365abc4f7557/scipy-1.16.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:deb3841c925eeddb6afc1e4e4a45e418d19ec7b87c5df177695224078e8ec733", size = 38878540, upload-time = "2025-10-28T17:32:23.907Z" }, + { url = "https://files.pythonhosted.org/packages/f1/d0/22ec7036ba0b0a35bccb7f25ab407382ed34af0b111475eb301c16f8a2e5/scipy-1.16.3-cp311-cp311-win_amd64.whl", hash = "sha256:53c3844d527213631e886621df5695d35e4f6a75f620dca412bcd292f6b87d78", size = 38722107, upload-time = "2025-10-28T17:32:29.921Z" }, + { url = "https://files.pythonhosted.org/packages/7b/60/8a00e5a524bb3bf8898db1650d350f50e6cffb9d7a491c561dc9826c7515/scipy-1.16.3-cp311-cp311-win_arm64.whl", hash = "sha256:9452781bd879b14b6f055b26643703551320aa8d79ae064a71df55c00286a184", size = 25506272, upload-time = "2025-10-28T17:32:34.577Z" }, + { url = "https://files.pythonhosted.org/packages/40/41/5bf55c3f386b1643812f3a5674edf74b26184378ef0f3e7c7a09a7e2ca7f/scipy-1.16.3-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:81fc5827606858cf71446a5e98715ba0e11f0dbc83d71c7409d05486592a45d6", size = 36659043, upload-time = "2025-10-28T17:32:40.285Z" }, + { url = "https://files.pythonhosted.org/packages/1e/0f/65582071948cfc45d43e9870bf7ca5f0e0684e165d7c9ef4e50d783073eb/scipy-1.16.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:c97176013d404c7346bf57874eaac5187d969293bf40497140b0a2b2b7482e07", size = 28898986, upload-time = "2025-10-28T17:32:45.325Z" }, + { url = "https://files.pythonhosted.org/packages/96/5e/36bf3f0ac298187d1ceadde9051177d6a4fe4d507e8f59067dc9dd39e650/scipy-1.16.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2b71d93c8a9936046866acebc915e2af2e292b883ed6e2cbe5c34beb094b82d9", size = 20889814, upload-time = "2025-10-28T17:32:49.277Z" }, + { url = "https://files.pythonhosted.org/packages/80/35/178d9d0c35394d5d5211bbff7ac4f2986c5488b59506fef9e1de13ea28d3/scipy-1.16.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3d4a07a8e785d80289dfe66b7c27d8634a773020742ec7187b85ccc4b0e7b686", size = 23565795, upload-time = "2025-10-28T17:32:53.337Z" }, + { url = "https://files.pythonhosted.org/packages/fa/46/d1146ff536d034d02f83c8afc3c4bab2eddb634624d6529a8512f3afc9da/scipy-1.16.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0553371015692a898e1aa858fed67a3576c34edefa6b7ebdb4e9dde49ce5c203", size = 33349476, upload-time = "2025-10-28T17:32:58.353Z" }, + { url = "https://files.pythonhosted.org/packages/79/2e/415119c9ab3e62249e18c2b082c07aff907a273741b3f8160414b0e9193c/scipy-1.16.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:72d1717fd3b5e6ec747327ce9bda32d5463f472c9dce9f54499e81fbd50245a1", size = 35676692, upload-time = "2025-10-28T17:33:03.88Z" }, + { url = "https://files.pythonhosted.org/packages/27/82/df26e44da78bf8d2aeaf7566082260cfa15955a5a6e96e6a29935b64132f/scipy-1.16.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1fb2472e72e24d1530debe6ae078db70fb1605350c88a3d14bc401d6306dbffe", size = 36019345, upload-time = "2025-10-28T17:33:09.773Z" }, + { url = "https://files.pythonhosted.org/packages/82/31/006cbb4b648ba379a95c87262c2855cd0d09453e500937f78b30f02fa1cd/scipy-1.16.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c5192722cffe15f9329a3948c4b1db789fbb1f05c97899187dcf009b283aea70", size = 38678975, upload-time = "2025-10-28T17:33:15.809Z" }, + { url = "https://files.pythonhosted.org/packages/c2/7f/acbd28c97e990b421af7d6d6cd416358c9c293fc958b8529e0bd5d2a2a19/scipy-1.16.3-cp312-cp312-win_amd64.whl", hash = "sha256:56edc65510d1331dae01ef9b658d428e33ed48b4f77b1d51caf479a0253f96dc", size = 38555926, upload-time = "2025-10-28T17:33:21.388Z" }, + { url = "https://files.pythonhosted.org/packages/ce/69/c5c7807fd007dad4f48e0a5f2153038dc96e8725d3345b9ee31b2b7bed46/scipy-1.16.3-cp312-cp312-win_arm64.whl", hash = "sha256:a8a26c78ef223d3e30920ef759e25625a0ecdd0d60e5a8818b7513c3e5384cf2", size = 25463014, upload-time = "2025-10-28T17:33:25.975Z" }, + { url = "https://files.pythonhosted.org/packages/72/f1/57e8327ab1508272029e27eeef34f2302ffc156b69e7e233e906c2a5c379/scipy-1.16.3-cp313-cp313-macosx_10_14_x86_64.whl", hash = "sha256:d2ec56337675e61b312179a1ad124f5f570c00f920cc75e1000025451b88241c", size = 36617856, upload-time = "2025-10-28T17:33:31.375Z" }, + { url = "https://files.pythonhosted.org/packages/44/13/7e63cfba8a7452eb756306aa2fd9b37a29a323b672b964b4fdeded9a3f21/scipy-1.16.3-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:16b8bc35a4cc24db80a0ec836a9286d0e31b2503cb2fd7ff7fb0e0374a97081d", size = 28874306, upload-time = "2025-10-28T17:33:36.516Z" }, + { url = "https://files.pythonhosted.org/packages/15/65/3a9400efd0228a176e6ec3454b1fa998fbbb5a8defa1672c3f65706987db/scipy-1.16.3-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:5803c5fadd29de0cf27fa08ccbfe7a9e5d741bf63e4ab1085437266f12460ff9", size = 20865371, upload-time = "2025-10-28T17:33:42.094Z" }, + { url = "https://files.pythonhosted.org/packages/33/d7/eda09adf009a9fb81827194d4dd02d2e4bc752cef16737cc4ef065234031/scipy-1.16.3-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:b81c27fc41954319a943d43b20e07c40bdcd3ff7cf013f4fb86286faefe546c4", size = 23524877, upload-time = "2025-10-28T17:33:48.483Z" }, + { url = "https://files.pythonhosted.org/packages/7d/6b/3f911e1ebc364cb81320223a3422aab7d26c9c7973109a9cd0f27c64c6c0/scipy-1.16.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0c3b4dd3d9b08dbce0f3440032c52e9e2ab9f96ade2d3943313dfe51a7056959", size = 33342103, upload-time = "2025-10-28T17:33:56.495Z" }, + { url = "https://files.pythonhosted.org/packages/21/f6/4bfb5695d8941e5c570a04d9fcd0d36bce7511b7d78e6e75c8f9791f82d0/scipy-1.16.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7dc1360c06535ea6116a2220f760ae572db9f661aba2d88074fe30ec2aa1ff88", size = 35697297, upload-time = "2025-10-28T17:34:04.722Z" }, + { url = "https://files.pythonhosted.org/packages/04/e1/6496dadbc80d8d896ff72511ecfe2316b50313bfc3ebf07a3f580f08bd8c/scipy-1.16.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:663b8d66a8748051c3ee9c96465fb417509315b99c71550fda2591d7dd634234", size = 36021756, upload-time = "2025-10-28T17:34:13.482Z" }, + { url = "https://files.pythonhosted.org/packages/fe/bd/a8c7799e0136b987bda3e1b23d155bcb31aec68a4a472554df5f0937eef7/scipy-1.16.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:eab43fae33a0c39006a88096cd7b4f4ef545ea0447d250d5ac18202d40b6611d", size = 38696566, upload-time = "2025-10-28T17:34:22.384Z" }, + { url = "https://files.pythonhosted.org/packages/cd/01/1204382461fcbfeb05b6161b594f4007e78b6eba9b375382f79153172b4d/scipy-1.16.3-cp313-cp313-win_amd64.whl", hash = "sha256:062246acacbe9f8210de8e751b16fc37458213f124bef161a5a02c7a39284304", size = 38529877, upload-time = "2025-10-28T17:35:51.076Z" }, + { url = "https://files.pythonhosted.org/packages/7f/14/9d9fbcaa1260a94f4bb5b64ba9213ceb5d03cd88841fe9fd1ffd47a45b73/scipy-1.16.3-cp313-cp313-win_arm64.whl", hash = "sha256:50a3dbf286dbc7d84f176f9a1574c705f277cb6565069f88f60db9eafdbe3ee2", size = 25455366, upload-time = "2025-10-28T17:35:59.014Z" }, + { url = "https://files.pythonhosted.org/packages/e2/a3/9ec205bd49f42d45d77f1730dbad9ccf146244c1647605cf834b3a8c4f36/scipy-1.16.3-cp313-cp313t-macosx_10_14_x86_64.whl", hash = "sha256:fb4b29f4cf8cc5a8d628bc8d8e26d12d7278cd1f219f22698a378c3d67db5e4b", size = 37027931, upload-time = "2025-10-28T17:34:31.451Z" }, + { url = "https://files.pythonhosted.org/packages/25/06/ca9fd1f3a4589cbd825b1447e5db3a8ebb969c1eaf22c8579bd286f51b6d/scipy-1.16.3-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:8d09d72dc92742988b0e7750bddb8060b0c7079606c0d24a8cc8e9c9c11f9079", size = 29400081, upload-time = "2025-10-28T17:34:39.087Z" }, + { url = "https://files.pythonhosted.org/packages/6a/56/933e68210d92657d93fb0e381683bc0e53a965048d7358ff5fbf9e6a1b17/scipy-1.16.3-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:03192a35e661470197556de24e7cb1330d84b35b94ead65c46ad6f16f6b28f2a", size = 21391244, upload-time = "2025-10-28T17:34:45.234Z" }, + { url = "https://files.pythonhosted.org/packages/a8/7e/779845db03dc1418e215726329674b40576879b91814568757ff0014ad65/scipy-1.16.3-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:57d01cb6f85e34f0946b33caa66e892aae072b64b034183f3d87c4025802a119", size = 23929753, upload-time = "2025-10-28T17:34:51.793Z" }, + { url = "https://files.pythonhosted.org/packages/4c/4b/f756cf8161d5365dcdef9e5f460ab226c068211030a175d2fc7f3f41ca64/scipy-1.16.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:96491a6a54e995f00a28a3c3badfff58fd093bf26cd5fb34a2188c8c756a3a2c", size = 33496912, upload-time = "2025-10-28T17:34:59.8Z" }, + { url = "https://files.pythonhosted.org/packages/09/b5/222b1e49a58668f23839ca1542a6322bb095ab8d6590d4f71723869a6c2c/scipy-1.16.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:cd13e354df9938598af2be05822c323e97132d5e6306b83a3b4ee6724c6e522e", size = 35802371, upload-time = "2025-10-28T17:35:08.173Z" }, + { url = "https://files.pythonhosted.org/packages/c1/8d/5964ef68bb31829bde27611f8c9deeac13764589fe74a75390242b64ca44/scipy-1.16.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:63d3cdacb8a824a295191a723ee5e4ea7768ca5ca5f2838532d9f2e2b3ce2135", size = 36190477, upload-time = "2025-10-28T17:35:16.7Z" }, + { url = "https://files.pythonhosted.org/packages/ab/f2/b31d75cb9b5fa4dd39a0a931ee9b33e7f6f36f23be5ef560bf72e0f92f32/scipy-1.16.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e7efa2681ea410b10dde31a52b18b0154d66f2485328830e45fdf183af5aefc6", size = 38796678, upload-time = "2025-10-28T17:35:26.354Z" }, + { url = "https://files.pythonhosted.org/packages/b4/1e/b3723d8ff64ab548c38d87055483714fefe6ee20e0189b62352b5e015bb1/scipy-1.16.3-cp313-cp313t-win_amd64.whl", hash = "sha256:2d1ae2cf0c350e7705168ff2429962a89ad90c2d49d1dd300686d8b2a5af22fc", size = 38640178, upload-time = "2025-10-28T17:35:35.304Z" }, + { url = "https://files.pythonhosted.org/packages/8e/f3/d854ff38789aca9b0cc23008d607ced9de4f7ab14fa1ca4329f86b3758ca/scipy-1.16.3-cp313-cp313t-win_arm64.whl", hash = "sha256:0c623a54f7b79dd88ef56da19bc2873afec9673a48f3b85b18e4d402bdd29a5a", size = 25803246, upload-time = "2025-10-28T17:35:42.155Z" }, + { url = "https://files.pythonhosted.org/packages/99/f6/99b10fd70f2d864c1e29a28bbcaa0c6340f9d8518396542d9ea3b4aaae15/scipy-1.16.3-cp314-cp314-macosx_10_14_x86_64.whl", hash = "sha256:875555ce62743e1d54f06cdf22c1e0bc47b91130ac40fe5d783b6dfa114beeb6", size = 36606469, upload-time = "2025-10-28T17:36:08.741Z" }, + { url = "https://files.pythonhosted.org/packages/4d/74/043b54f2319f48ea940dd025779fa28ee360e6b95acb7cd188fad4391c6b/scipy-1.16.3-cp314-cp314-macosx_12_0_arm64.whl", hash = "sha256:bb61878c18a470021fb515a843dc7a76961a8daceaaaa8bad1332f1bf4b54657", size = 28872043, upload-time = "2025-10-28T17:36:16.599Z" }, + { url = "https://files.pythonhosted.org/packages/4d/e1/24b7e50cc1c4ee6ffbcb1f27fe9f4c8b40e7911675f6d2d20955f41c6348/scipy-1.16.3-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:f2622206f5559784fa5c4b53a950c3c7c1cf3e84ca1b9c4b6c03f062f289ca26", size = 20862952, upload-time = "2025-10-28T17:36:22.966Z" }, + { url = "https://files.pythonhosted.org/packages/dd/3a/3e8c01a4d742b730df368e063787c6808597ccb38636ed821d10b39ca51b/scipy-1.16.3-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:7f68154688c515cdb541a31ef8eb66d8cd1050605be9dcd74199cbd22ac739bc", size = 23508512, upload-time = "2025-10-28T17:36:29.731Z" }, + { url = "https://files.pythonhosted.org/packages/1f/60/c45a12b98ad591536bfe5330cb3cfe1850d7570259303563b1721564d458/scipy-1.16.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8b3c820ddb80029fe9f43d61b81d8b488d3ef8ca010d15122b152db77dc94c22", size = 33413639, upload-time = "2025-10-28T17:36:37.982Z" }, + { url = "https://files.pythonhosted.org/packages/71/bc/35957d88645476307e4839712642896689df442f3e53b0fa016ecf8a3357/scipy-1.16.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d3837938ae715fc0fe3c39c0202de3a8853aff22ca66781ddc2ade7554b7e2cc", size = 35704729, upload-time = "2025-10-28T17:36:46.547Z" }, + { url = "https://files.pythonhosted.org/packages/3b/15/89105e659041b1ca11c386e9995aefacd513a78493656e57789f9d9eab61/scipy-1.16.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:aadd23f98f9cb069b3bd64ddc900c4d277778242e961751f77a8cb5c4b946fb0", size = 36086251, upload-time = "2025-10-28T17:36:55.161Z" }, + { url = "https://files.pythonhosted.org/packages/1a/87/c0ea673ac9c6cc50b3da2196d860273bc7389aa69b64efa8493bdd25b093/scipy-1.16.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b7c5f1bda1354d6a19bc6af73a649f8285ca63ac6b52e64e658a5a11d4d69800", size = 38716681, upload-time = "2025-10-28T17:37:04.1Z" }, + { url = "https://files.pythonhosted.org/packages/91/06/837893227b043fb9b0d13e4bd7586982d8136cb249ffb3492930dab905b8/scipy-1.16.3-cp314-cp314-win_amd64.whl", hash = "sha256:e5d42a9472e7579e473879a1990327830493a7047506d58d73fc429b84c1d49d", size = 39358423, upload-time = "2025-10-28T17:38:20.005Z" }, + { url = "https://files.pythonhosted.org/packages/95/03/28bce0355e4d34a7c034727505a02d19548549e190bedd13a721e35380b7/scipy-1.16.3-cp314-cp314-win_arm64.whl", hash = "sha256:6020470b9d00245926f2d5bb93b119ca0340f0d564eb6fbaad843eaebf9d690f", size = 26135027, upload-time = "2025-10-28T17:38:24.966Z" }, + { url = "https://files.pythonhosted.org/packages/b2/6f/69f1e2b682efe9de8fe9f91040f0cd32f13cfccba690512ba4c582b0bc29/scipy-1.16.3-cp314-cp314t-macosx_10_14_x86_64.whl", hash = "sha256:e1d27cbcb4602680a49d787d90664fa4974063ac9d4134813332a8c53dbe667c", size = 37028379, upload-time = "2025-10-28T17:37:14.061Z" }, + { url = "https://files.pythonhosted.org/packages/7c/2d/e826f31624a5ebbab1cd93d30fd74349914753076ed0593e1d56a98c4fb4/scipy-1.16.3-cp314-cp314t-macosx_12_0_arm64.whl", hash = "sha256:9b9c9c07b6d56a35777a1b4cc8966118fb16cfd8daf6743867d17d36cfad2d40", size = 29400052, upload-time = "2025-10-28T17:37:21.709Z" }, + { url = "https://files.pythonhosted.org/packages/69/27/d24feb80155f41fd1f156bf144e7e049b4e2b9dd06261a242905e3bc7a03/scipy-1.16.3-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:3a4c460301fb2cffb7f88528f30b3127742cff583603aa7dc964a52c463b385d", size = 21391183, upload-time = "2025-10-28T17:37:29.559Z" }, + { url = "https://files.pythonhosted.org/packages/f8/d3/1b229e433074c5738a24277eca520a2319aac7465eea7310ea6ae0e98ae2/scipy-1.16.3-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:f667a4542cc8917af1db06366d3f78a5c8e83badd56409f94d1eac8d8d9133fa", size = 23930174, upload-time = "2025-10-28T17:37:36.306Z" }, + { url = "https://files.pythonhosted.org/packages/16/9d/d9e148b0ec680c0f042581a2be79a28a7ab66c0c4946697f9e7553ead337/scipy-1.16.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f379b54b77a597aa7ee5e697df0d66903e41b9c85a6dd7946159e356319158e8", size = 33497852, upload-time = "2025-10-28T17:37:42.228Z" }, + { url = "https://files.pythonhosted.org/packages/2f/22/4e5f7561e4f98b7bea63cf3fd7934bff1e3182e9f1626b089a679914d5c8/scipy-1.16.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4aff59800a3b7f786b70bfd6ab551001cb553244988d7d6b8299cb1ea653b353", size = 35798595, upload-time = "2025-10-28T17:37:48.102Z" }, + { url = "https://files.pythonhosted.org/packages/83/42/6644d714c179429fc7196857866f219fef25238319b650bb32dde7bf7a48/scipy-1.16.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:da7763f55885045036fabcebd80144b757d3db06ab0861415d1c3b7c69042146", size = 36186269, upload-time = "2025-10-28T17:37:53.72Z" }, + { url = "https://files.pythonhosted.org/packages/ac/70/64b4d7ca92f9cf2e6fc6aaa2eecf80bb9b6b985043a9583f32f8177ea122/scipy-1.16.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ffa6eea95283b2b8079b821dc11f50a17d0571c92b43e2b5b12764dc5f9b285d", size = 38802779, upload-time = "2025-10-28T17:37:59.393Z" }, + { url = "https://files.pythonhosted.org/packages/61/82/8d0e39f62764cce5ffd5284131e109f07cf8955aef9ab8ed4e3aa5e30539/scipy-1.16.3-cp314-cp314t-win_amd64.whl", hash = "sha256:d9f48cafc7ce94cf9b15c6bffdc443a81a27bf7075cf2dcd5c8b40f85d10c4e7", size = 39471128, upload-time = "2025-10-28T17:38:05.259Z" }, + { url = "https://files.pythonhosted.org/packages/64/47/a494741db7280eae6dc033510c319e34d42dd41b7ac0c7ead39354d1a2b5/scipy-1.16.3-cp314-cp314t-win_arm64.whl", hash = "sha256:21d9d6b197227a12dcbf9633320a4e34c6b0e51c57268df255a0942983bac562", size = 26464127, upload-time = "2025-10-28T17:38:11.34Z" }, +] + [[package]] name = "secretstorage" version = "3.5.0" @@ -2047,6 +2300,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" }, ] +[[package]] +name = "tzdata" +version = "2025.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5e/a7/c202b344c5ca7daf398f3b8a477eeb205cf3b6f32e7ec3a6bac0629ca975/tzdata-2025.3.tar.gz", hash = "sha256:de39c2ca5dc7b0344f2eba86f49d614019d29f060fc4ebc8a417896a620b56a7", size = 196772, upload-time = "2025-12-13T17:45:35.667Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/b0/003792df09decd6849a5e39c28b513c06e84436a54440380862b5aeff25d/tzdata-2025.3-py2.py3-none-any.whl", hash = "sha256:06a47e5700f3081aab02b2e513160914ff0694bce9947d6b76ebd6bf57cfc5d1", size = 348521, upload-time = "2025-12-13T17:45:33.889Z" }, +] + [[package]] name = "urllib3" version = "2.6.2"