diff --git a/examples/compositional_analysis/README.md b/examples/compositional_analysis/README.md new file mode 100644 index 0000000..4227376 --- /dev/null +++ b/examples/compositional_analysis/README.md @@ -0,0 +1,14 @@ +# Compositional Analysis for Markovian Specifications + +This directory provides an example use case of **compositional analysis** for *Markovian* (i.e., memoryless) specifications. + +The core idea is to perform **Statistical Model Checking (SMC)** or **falsification** on *primitive scenarios* — scenarios that serve as building blocks for defining more complex *composite scenarios*. The analysis traces generated from these primitives are stored in a `ScenarioBase` object. + +These traces can then be supplied to an instance of `CompositionalAnalysisEngine`, which supports querying over composite scenario structures to perform compositional analysis based on the primitive scenario traces. + +This example uses the [MetaDrive](https://metadriverse.github.io/metadrive/) simulator. + +* Train a reinforcement learning policy using `train.py`. +* Test the policy and save generated traces using `test.py`. +* See `analyze.py` for an example of how to perform compositional analysis on generated traces. + diff --git a/examples/compositional_analysis/analyze.py b/examples/compositional_analysis/analyze.py new file mode 100644 index 0000000..4d0e17b --- /dev/null +++ b/examples/compositional_analysis/analyze.py @@ -0,0 +1,47 @@ +import pandas as pd +from verifai.compositional_analysis import CompositionalAnalysisEngine, ScenarioBase + + +if __name__ == "__main__": + logs = { + "S": "storage/traces/S/traces.csv", + "X": "storage/traces/X/traces.csv", + # "O": "storage/traces/O/traces.csv", + # "C": "storage/traces/C/traces.csv", + "SX": "storage/traces/SX/traces.csv", + # "SO": "storage/traces/SO/traces.csv", + # "SC": "storage/traces/SC/traces.csv", + "SXS": "storage/traces/SXS/traces.csv", + # "SOS": "storage/traces/SOS/traces.csv", + # "SCS": "storage/traces/SCS/traces.csv", + } + scenario_base = ScenarioBase(logs, delta=0.01) + + print("SMC") + for s in logs: + print(f"{s}: rho = {scenario_base.get_success_prob(s):.4f} ± {scenario_base.get_success_prob_uncertainty(s):.4f}") + + engine = CompositionalAnalysisEngine(scenario_base) + + pd.set_option('display.max_rows', None) # Display all rows + pd.set_option('display.max_columns', None) # Display all columns + pd.set_option('display.width', 1000) # Ensure enough width to prevent wrapping + + for s in logs: + print(f"Scenario: {s}") + print(f"Compositional SMC") + rho, uncertainty = engine.check( + s, + features=["x", "y", "heading", "speed"], + center_feat_idx=[0, 1], + ) + print(f"Estimated {s}: rho = {rho:.4f} ± {uncertainty:.4f}") + print(f"Compositional Falsification") + cex = engine.falsify( + s, + features=["x", "y", "heading", "speed"], + center_feat_idx=[0, 1], + align_feat_idx=[0, 1], + ) + print(f"Counterexample = {cex}") + diff --git a/examples/compositional_analysis/exps.sh b/examples/compositional_analysis/exps.sh new file mode 100755 index 0000000..53fc722 --- /dev/null +++ b/examples/compositional_analysis/exps.sh @@ -0,0 +1,23 @@ +# Fixed Error (epsilon = 0.1) +## Monolithic +{ time python run_exp.py --expert --save_dir storage/fixed_error_monolithic_SX --confidence_level 0.95 --error_bound 0.04295 --scenario "SX"; } &> storage/results/fixed_error_monolithic_SX.txt & +{ time python run_exp.py --expert --save_dir storage/fixed_error_monolithic_COS --confidence_level 0.95 --error_bound 0.04295 --scenario "COS"; } &> storage/results/fixed_error_monolithic_COS.txt & +{ time python run_exp.py --expert --save_dir storage/fixed_error_monolithic_XSOC --confidence_level 0.95 --error_bound 0.04295 --scenario "XSOC"; } &> storage/results/fixed_error_monolithic_XSOC.txt & +{ time python run_exp.py --expert --save_dir storage/fixed_error_monolithic_SOCXS --confidence_level 0.95 --error_bound 0.04295 --scenario "SOCXS"; } &> storage/results/fixed_error_monolithic_SOCXS.txt & +## Compositional +{ time python run_exp.py --expert --save_dir storage/fixed_error_compositional_SX --confidence_level 0.95 --error_bound 0.04295 --scenario "SX" --compositional; } &> storage/results/fixed_error_compositional_SX.txt & +{ time python run_exp.py --expert --save_dir storage/fixed_error_compositional_COS --confidence_level 0.95 --error_bound 0.04295 --scenario "COS" --compositional; } &> storage/results/fixed_error_compositional_COS.txt & +{ time python run_exp.py --expert --save_dir storage/fixed_error_compositional_XSOC --confidence_level 0.95 --error_bound 0.04295 --scenario "XSOC" --compositional; } &> storage/results/fixed_error_compositional_XSOC.txt & +{ time python run_exp.py --expert --save_dir storage/fixed_error_compositional_SOCXS --confidence_level 0.95 --error_bound 0.04295 --scenario "SOCXS" --compositional; } &> storage/results/fixed_error_compositional_SOCXS.txt & + +# Fixed Time Budget (2 mins) +## Monolithic +{ time python run_exp.py --expert --save_dir storage/fixed_time_monolithic_SX --confidence_level 0.95 --time_budget 120 --scenario "SX"; } &> storage/results/fixed_time_monolithic_SX.txt & +{ time python run_exp.py --expert --save_dir storage/fixed_time_monolithic_COS --confidence_level 0.95 --time_budget 120 --scenario "COS"; } &> storage/results/fixed_time_monolithic_COS.txt & +{ time python run_exp.py --expert --save_dir storage/fixed_time_monolithic_XSOC --confidence_level 0.95 --time_budget 120 --scenario "XSOC"; } &> storage/results/fixed_time_monolithic_XSOC.txt & +{ time python run_exp.py --expert --save_dir storage/fixed_time_monolithic_SOCXS --confidence_level 0.95 --time_budget 120 --scenario "SOCXS"; } &> storage/results/fixed_time_monolithic_SOCXS.txt & +## Compositional +{ time python run_exp.py --expert --save_dir storage/fixed_time_compositional_SX --confidence_level 0.95 --time_budget 120 --scenario "SX" --compositional; } &> storage/results/fixed_time_compositional_SX.txt & +{ time python run_exp.py --expert --save_dir storage/fixed_time_compositional_COS --confidence_level 0.95 --time_budget 120 --scenario "COS" --compositional; } &> storage/results/fixed_time_compositional_COS.txt & +{ time python run_exp.py --expert --save_dir storage/fixed_time_compositional_XSOC --confidence_level 0.95 --time_budget 120 --scenario "XSOC" --compositional; } &> storage/results/fixed_time_compositional_XSOC.txt & +{ time python run_exp.py --expert --save_dir storage/fixed_time_compositional_SOCXS --confidence_level 0.95 --time_budget 120 --scenario "SOCXS" --compositional; } &> storage/results/fixed_time_compositional_SOCXS.txt & diff --git a/examples/compositional_analysis/run_exp.py b/examples/compositional_analysis/run_exp.py new file mode 100644 index 0000000..528057e --- /dev/null +++ b/examples/compositional_analysis/run_exp.py @@ -0,0 +1,357 @@ +import os +import time +import shutil +import math +import multiprocessing as mp +from verifai.compositional_analysis import ScenarioBase, CompositionalAnalysisEngine +from utils import generate_traces + + +def compute_hoeffding_samples(confidence_level, error_bound): + """ + Compute the number of samples needed using Hoeffding's inequality. + + Hoeffding's inequality: P(|rho_hat - rho| >= epsilon) <= 2 * exp(-2 * n * epsilon^2) + + Setting 2 * exp(-2 * n * epsilon^2) = 1 - confidence_level (delta) + Solving for n: n = ln(2/delta) / (2 * epsilon^2) + + Args: + confidence_level: Desired confidence level (e.g., 0.95 for 95%) + error_bound: Maximum error epsilon (e.g., 0.01 for 1%) + + Returns: + Number of samples needed + """ + delta = 1 - confidence_level + n = math.log(2 / delta) / (2 * error_bound ** 2) + return int(math.ceil(n)) + + +def _worker_generate_traces(save_dir, scenario, n, expert, model_path): + print(f"[PID={os.getpid()}] Starting scenario {scenario}") + # If n is None or inf, use a very large number that generate_traces can handle + if n is None or n == float('inf'): + traces_to_generate = 10**9 # 1 billion (effectively infinite for practical purposes) + else: + traces_to_generate = n + generate_traces( + n=traces_to_generate, + save_dir=save_dir, + scenario=scenario, + model_path=model_path, + expert=expert + ) + print(f"[PID={os.getpid()}] Finished scenario {scenario}") + + +def generate_traces_parallel(n, save_dir, scenarios, time_budget, expert, model_path): + """ + Generate traces in parallel using multiprocessing with HARD STOP. + Terminates all processes when time budget is reached, discarding only the current partial trace. + Completed traces are kept. + """ + # Clear old trace directories + for s in scenarios: + scenario_dir = os.path.join(save_dir, s) + if os.path.exists(scenario_dir): + shutil.rmtree(scenario_dir) + + print("=== Generating Traces (Parallel - HARD STOP) ===") + + # Launch all processes + processes = [] + start_time = time.time() + + for s in scenarios: + print(f"Launching scenario {s}") + p = mp.Process( + target=_worker_generate_traces, + args=(save_dir, s, n, expert, model_path) + ) + p.start() + processes.append((s, p)) + + # Monitor time budget and terminate if exceeded + trace_counts_before_termination = {} + while True: + elapsed = time.time() - start_time + + # Check if time budget exceeded (skip check if time_budget is inf) + if time_budget != float('inf') and elapsed >= time_budget: + print(f"\n[HARD STOP] Time budget ({time_budget}s) reached at {elapsed:.2f}s") + + # Record trace counts RIGHT BEFORE termination + for s in scenarios: + csv_path = os.path.join(save_dir, s, "traces.csv") + if os.path.exists(csv_path): + with open(csv_path, 'r') as f: + lines = f.readlines() + if len(lines) <= 1: # Only header or empty + trace_counts_before_termination[s] = 0 + else: + # Count unique trace_ids (episodes) + trace_ids = set() + for line in lines[1:]: # Skip header + parts = line.split(',') + if parts: + trace_ids.add(parts[0]) # First column is trace_id + trace_counts_before_termination[s] = len(trace_ids) + else: + trace_counts_before_termination[s] = 0 + + print("Terminating all running processes...") + + for scenario_name, proc in processes: + if proc.is_alive(): + print(f"Terminating scenario {scenario_name} (PID={proc.pid})") + proc.terminate() + proc.join(timeout=5) # Wait up to 5 seconds for graceful termination + if proc.is_alive(): + print(f"Force killing scenario {scenario_name}") + proc.kill() + proc.join() + break + + # Check if all processes finished naturally + all_done = all(not proc.is_alive() for _, proc in processes) + if all_done: + print(f"All processes finished naturally (elapsed: {elapsed:.2f}s)") + break + + time.sleep(0.1) # Check every 100ms + + # Build logs dict for scenarios with valid traces + logs = {} + for s, proc in processes: + csv_path = os.path.join(save_dir, s, "traces.csv") + + if os.path.exists(csv_path): + # If process was terminated, restore to pre-termination state + # (remove the last partial trace that was being written) + if proc.exitcode != 0 and s in trace_counts_before_termination: + with open(csv_path, 'r') as f: + lines = f.readlines() + + if len(lines) <= 1: # Only header or empty + current_count = 0 + else: + # Count unique trace_ids in final file + trace_ids = set() + for line in lines[1:]: # Skip header + parts = line.split(',') + if parts: + trace_ids.add(parts[0]) + current_count = len(trace_ids) + + expected_count = trace_counts_before_termination[s] + + if current_count > expected_count: + # There's a partial trace - keep only the completed traces + print(f"[INFO] Scenario {s}: Removing partial episode (had {current_count} episodes, keeping {expected_count})") + + # Keep only rows with trace_id < expected_count + with open(csv_path, 'w') as f: + f.write(lines[0]) # Write header + for line in lines[1:]: + parts = line.split(',') + if parts: + trace_id = int(parts[0]) + if trace_id < expected_count: + f.write(line) + + # Only add to logs if there are any complete traces + if expected_count > 0: + logs[s] = csv_path + print(f"[INFO] Scenario {s} has {expected_count} completed episodes.") + else: + print(f"[INFO] Scenario {s} had no completed episodes.") + else: + # Process completed successfully + # Count episodes in completed file + with open(csv_path, 'r') as f: + lines = f.readlines() + if len(lines) > 1: + trace_ids = set() + for line in lines[1:]: + parts = line.split(',') + if parts: + trace_ids.add(parts[0]) + episode_count = len(trace_ids) + else: + episode_count = 0 + + logs[s] = csv_path + print(f"[INFO] Scenario {s} completed successfully with {episode_count} episodes.") + else: + print(f"[INFO] Scenario {s} produced no traces.") + + if not logs: + print("No traces generated.") + + return logs + + +def run_monolithic_smc(scenario_base): + """ + Run monolithic SMC analysis on generated traces. + """ + if not scenario_base.logbase: + print("No traces to analyze.") + + results = {} + + print("\n=== Monolithic SMC Results ===") + for s in scenario_base.logbase: + rho = scenario_base.get_success_prob(s) + unc = scenario_base.get_success_prob_uncertainty(s) + print(f"{s}: rho = {rho:.4f} ± {unc:.4f}") + results[s] = {"rho": rho, "uncertainty": unc} + + return results + + +def run_SMC_compositional(scenarios, time_budget, scenario_base): + print("\n=== Running Compositional SMC ===") + start_time = time.time() + results = {} + + engine = CompositionalAnalysisEngine(scenario_base) + + for s in scenarios: + elapsed = time.time() - start_time + remaining_time = time_budget - elapsed + if time_budget != float('inf') and remaining_time <= 0: + print(f"Time budget exhausted before scenario {s}") + break + + rho, uncertainty = engine.check( + s, + features=["x", "y", "heading", "speed"], + center_feat_idx=[0, 1], + ) + + print(f"Estimated {s}: rho = {rho:.4f} ± {uncertainty:.4f}") + + results[s] = {"rho": rho, "uncertainty": uncertainty} + + return results + + +def parse_scenario(input_scenario): + scenarios_set = set() + for s in input_scenario: + scenarios_set.add(s) + return scenarios_set + + +def testScenario(input_scenario, isCompositional, time_budget, n, save_dir, expert, model_path, confidence_level=None, error_bound=None, reuse_traces=False): + """ + Test scenario with hard time budget enforcement. + Terminates all processes when time budget is reached. + + Args: + confidence_level: Confidence level for ground truth (e.g., 0.95) + error_bound: Error bound for ground truth (e.g., 0.01) + reuse_traces: If True, use existing traces from save_dir without generating new ones + """ + + # monolithic trace generation + if not isCompositional: + print("Running MONOLITHIC SMC") + scenarios = [input_scenario] + + if not reuse_traces: + logs = generate_traces_parallel(n=n, save_dir=save_dir, scenarios=scenarios, time_budget=time_budget, expert=expert, model_path=model_path) + else: + # Build logs from existing traces + logs = {} + for s in scenarios: + csv_path = os.path.join(save_dir, s, "traces.csv") + if os.path.exists(csv_path): + logs[s] = csv_path + print(f"[INFO] Using existing traces for scenario {s}") + else: + print(f"[ERROR] No existing traces found for scenario {s} at {csv_path}") + + scenario_base = ScenarioBase(logs, delta=1-confidence_level) + run_monolithic_smc(scenario_base) + + # compositional trace generation + else: + print("Running COMPOSITIONAL SMC on primitive cases (what compositional will use)") + scenarios_set = parse_scenario(input_scenario) + scenarios = list(scenarios_set) + + if not reuse_traces: + logs = generate_traces_parallel(n=n, save_dir=save_dir, scenarios=scenarios, time_budget=time_budget, expert=expert, model_path=model_path) + else: + # Build logs from existing traces + logs = {} + for s in scenarios: + csv_path = os.path.join(save_dir, s, "traces.csv") + if os.path.exists(csv_path): + logs[s] = csv_path + print(f"[INFO] Using existing traces for scenario {s}") + else: + print(f"[ERROR] No existing traces found for scenario {s} at {csv_path}") + + scenario_base = ScenarioBase(logs, delta=1-confidence_level) + + # compositional rho + run_SMC_compositional(scenarios=[input_scenario], time_budget=time_budget, scenario_base=scenario_base) + + # Print summary for easy copy-paste to README + print("\n" + "="*60) + print("SUMMARY") + print("="*60) + print(f"Command: python compare_analysis.py --scenario \"{input_scenario}\" " + f"{'--compositional ' if isCompositional else ''}" + f"{'--expert ' if expert else ''}" + f"{'--reuse_traces ' if reuse_traces else ''}" + f"--time_budget {time_budget if time_budget != float('inf') else 'N/A'} " + f"--save_dir \"{save_dir}\"") + print("="*60) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Run SMC tests with compositional or monolithic approaches") + parser.add_argument("--scenario", type=str, default="SXC", help="Input scenario string (default: SXC)") + parser.add_argument("--compositional", action="store_true", help="Use compositional approach (default: False)") + parser.add_argument("--time_budget", type=int, default=None, help="Time budget in seconds (default: None)") + parser.add_argument("--expert", action="store_true", help="Use expert mode (default: False)") + parser.add_argument("--save_dir", type=str, default="storage/traces", help="Directory to save traces (default: storage/traces)") + parser.add_argument("--model_path", type=str, default="storage/models/model_map_2.zip", help="Path to model file (default: storage/models/model_map_2.zip)") + parser.add_argument("--confidence_level", type=float, default=None, help="Confidence level for ground truth (default: None)") + parser.add_argument("--error_bound", type=float, default=None, help="Error bound (epsilon) for ground truth (default: None)") + parser.add_argument("--reuse_traces", action="store_true", help="Use existing traces from save_dir without generating new ones (default: False)") + + args = parser.parse_args() + + mp.set_start_method("spawn") + + assert args.confidence_level is not None + assert (args.error_bound is not None) or (args.time_budget is not None) + + n = None + time_budget = float('inf') + if args.error_bound is not None: + n = compute_hoeffding_samples(args.confidence_level, args.error_bound) + elif args.time_budget is not None: + time_budget = args.time_budget + + testScenario( + input_scenario=args.scenario, + isCompositional=args.compositional, + time_budget=time_budget, + n=n, + save_dir=args.save_dir, + expert=args.expert, + model_path=args.model_path, + confidence_level=args.confidence_level, + error_bound=args.error_bound, + reuse_traces=args.reuse_traces + ) + diff --git a/examples/compositional_analysis/storage/results/fixed_error_compositional_COS.txt b/examples/compositional_analysis/storage/results/fixed_error_compositional_COS.txt new file mode 100644 index 0000000..d3d84b2 --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_error_compositional_COS.txt @@ -0,0 +1,55 @@ +Running COMPOSITIONAL SMC on primitive cases (what compositional will use) +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario C +Launching scenario O +Launching scenario S +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[PID=1889097] Starting scenario S +[PID=1889097] Finished scenario S +[PID=1889096] Starting scenario O +[PID=1889096] Finished scenario O +[PID=1889095] Starting scenario C +[PID=1889095] Finished scenario C +All processes finished naturally (elapsed: 1013.29s) +[INFO] Scenario C completed successfully with 1000 episodes. +[INFO] Scenario O completed successfully with 1000 episodes. +[INFO] Scenario S completed successfully with 1000 episodes. + +=== Running Compositional SMC === +Estimated COS: rho = 0.7152 ± 0.0685 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "COS" --compositional --expert --time_budget N/A --save_dir "storage/fixed_error_compositional_COS" +============================================================ + +real 17m0.943s +user 42m57.621s +sys 0m28.489s diff --git a/examples/compositional_analysis/storage/results/fixed_error_compositional_SOCXS.txt b/examples/compositional_analysis/storage/results/fixed_error_compositional_SOCXS.txt new file mode 100644 index 0000000..8812c25 --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_error_compositional_SOCXS.txt @@ -0,0 +1,68 @@ +Running COMPOSITIONAL SMC on primitive cases (what compositional will use) +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario X +Launching scenario S +Launching scenario O +Launching scenario C +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[PID=1889100] Starting scenario S +[PID=1889100] Finished scenario S +[PID=1889099] Starting scenario X +[PID=1889099] Finished scenario X +[PID=1889101] Starting scenario O +[PID=1889101] Finished scenario O +[PID=1889102] Starting scenario C +[PID=1889102] Finished scenario C +All processes finished naturally (elapsed: 1017.08s) +[INFO] Scenario X completed successfully with 1000 episodes. +[INFO] Scenario S completed successfully with 1000 episodes. +[INFO] Scenario O completed successfully with 1000 episodes. +[INFO] Scenario C completed successfully with 1000 episodes. + +=== Running Compositional SMC === +Estimated SOCXS: rho = 0.6733 ± 0.0873 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "SOCXS" --compositional --expert --time_budget N/A --save_dir "storage/fixed_error_compositional_SOCXS" +============================================================ + +real 17m5.056s +user 53m39.961s +sys 0m30.265s diff --git a/examples/compositional_analysis/storage/results/fixed_error_compositional_SX.txt b/examples/compositional_analysis/storage/results/fixed_error_compositional_SX.txt new file mode 100644 index 0000000..8b9f0d5 --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_error_compositional_SX.txt @@ -0,0 +1,42 @@ +Running COMPOSITIONAL SMC on primitive cases (what compositional will use) +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario X +Launching scenario S +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[PID=1889090] Starting scenario S +[PID=1889090] Finished scenario S +[PID=1889089] Starting scenario X +[PID=1889089] Finished scenario X +All processes finished naturally (elapsed: 719.05s) +[INFO] Scenario X completed successfully with 1000 episodes. +[INFO] Scenario S completed successfully with 1000 episodes. + +=== Running Compositional SMC === +Estimated SX: rho = 0.9497 ± 0.0640 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "SX" --compositional --expert --time_budget N/A --save_dir "storage/fixed_error_compositional_SX" +============================================================ + +real 12m6.153s +user 21m41.154s +sys 0m18.462s diff --git a/examples/compositional_analysis/storage/results/fixed_error_compositional_XSOC.txt b/examples/compositional_analysis/storage/results/fixed_error_compositional_XSOC.txt new file mode 100644 index 0000000..0be13bd --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_error_compositional_XSOC.txt @@ -0,0 +1,68 @@ +Running COMPOSITIONAL SMC on primitive cases (what compositional will use) +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario X +Launching scenario C +Launching scenario S +Launching scenario O +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[PID=1889109] Starting scenario S +[PID=1889109] Finished scenario S +[PID=1889107] Starting scenario X +[PID=1889107] Finished scenario X +[PID=1889110] Starting scenario O +[PID=1889110] Finished scenario O +[PID=1889108] Starting scenario C +[PID=1889108] Finished scenario C +All processes finished naturally (elapsed: 1016.35s) +[INFO] Scenario X completed successfully with 1000 episodes. +[INFO] Scenario C completed successfully with 1000 episodes. +[INFO] Scenario S completed successfully with 1000 episodes. +[INFO] Scenario O completed successfully with 1000 episodes. + +=== Running Compositional SMC === +Estimated XSOC: rho = 0.6705 ± 0.0789 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "XSOC" --compositional --expert --time_budget N/A --save_dir "storage/fixed_error_compositional_XSOC" +============================================================ + +real 17m4.733s +user 54m51.710s +sys 0m36.699s diff --git a/examples/compositional_analysis/storage/results/fixed_error_monolithic_COS.txt b/examples/compositional_analysis/storage/results/fixed_error_monolithic_COS.txt new file mode 100644 index 0000000..f3b3ea9 --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_error_monolithic_COS.txt @@ -0,0 +1,29 @@ +Running MONOLITHIC SMC +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario COS +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. +[PID=1889105] Starting scenario COS +[PID=1889105] Finished scenario COS +All processes finished naturally (elapsed: 2133.73s) +[INFO] Scenario COS completed successfully with 1000 episodes. + +=== Monolithic SMC Results === +COS: rho = 0.7560 ± 0.0429 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "COS" --expert --time_budget N/A --save_dir "storage/fixed_error_monolithic_COS" +============================================================ + +real 35m40.316s +user 34m57.279s +sys 0m17.355s diff --git a/examples/compositional_analysis/storage/results/fixed_error_monolithic_SOCXS.txt b/examples/compositional_analysis/storage/results/fixed_error_monolithic_SOCXS.txt new file mode 100644 index 0000000..fb8e7a3 --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_error_monolithic_SOCXS.txt @@ -0,0 +1,29 @@ +Running MONOLITHIC SMC +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario SOCXS +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. +[PID=1889087] Starting scenario SOCXS +[PID=1889087] Finished scenario SOCXS +All processes finished naturally (elapsed: 2883.29s) +[INFO] Scenario SOCXS completed successfully with 1000 episodes. + +=== Monolithic SMC Results === +SOCXS: rho = 0.6780 ± 0.0429 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "SOCXS" --expert --time_budget N/A --save_dir "storage/fixed_error_monolithic_SOCXS" +============================================================ + +real 48m9.998s +user 47m19.091s +sys 0m21.593s diff --git a/examples/compositional_analysis/storage/results/fixed_error_monolithic_SX.txt b/examples/compositional_analysis/storage/results/fixed_error_monolithic_SX.txt new file mode 100644 index 0000000..d56e9a0 --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_error_monolithic_SX.txt @@ -0,0 +1,29 @@ +Running MONOLITHIC SMC +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario SX +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. +[PID=1889068] Starting scenario SX +[PID=1889068] Finished scenario SX +All processes finished naturally (elapsed: 980.50s) +[INFO] Scenario SX completed successfully with 1000 episodes. + +=== Monolithic SMC Results === +SX: rho = 0.9900 ± 0.0429 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "SX" --expert --time_budget N/A --save_dir "storage/fixed_error_monolithic_SX" +============================================================ + +real 16m26.267s +user 16m4.159s +sys 0m7.722s diff --git a/examples/compositional_analysis/storage/results/fixed_error_monolithic_XSOC.txt b/examples/compositional_analysis/storage/results/fixed_error_monolithic_XSOC.txt new file mode 100644 index 0000000..be3bcf4 --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_error_monolithic_XSOC.txt @@ -0,0 +1,29 @@ +Running MONOLITHIC SMC +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario XSOC +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. +[PID=1889092] Starting scenario XSOC +[PID=1889092] Finished scenario XSOC +All processes finished naturally (elapsed: 2393.20s) +[INFO] Scenario XSOC completed successfully with 1000 episodes. + +=== Monolithic SMC Results === +XSOC: rho = 0.7050 ± 0.0429 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "XSOC" --expert --time_budget N/A --save_dir "storage/fixed_error_monolithic_XSOC" +============================================================ + +real 39m59.756s +user 39m13.651s +sys 0m18.494s diff --git a/examples/compositional_analysis/storage/results/fixed_time_compositional_COS.txt b/examples/compositional_analysis/storage/results/fixed_time_compositional_COS.txt new file mode 100644 index 0000000..538ecbc --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_time_compositional_COS.txt @@ -0,0 +1,56 @@ +Running COMPOSITIONAL SMC on primitive cases (what compositional will use) +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario S +Launching scenario O +Launching scenario C +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. + +[HARD STOP] Time budget (120s) reached at 120.10s +Terminating all running processes... +Terminating scenario S (PID=1889117) +Terminating scenario O (PID=1889118) +Terminating scenario C (PID=1889119) +[INFO] Scenario S has 90 completed episodes. +[INFO] Scenario O: Removing partial episode (had 51 episodes, keeping 50) +[INFO] Scenario O has 50 completed episodes. +[INFO] Scenario C: Removing partial episode (had 48 episodes, keeping 47) +[INFO] Scenario C has 47 completed episodes. + +=== Running Compositional SMC === +Estimated COS: rho = 0.6896 ± 0.2796 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "COS" --compositional --expert --time_budget 120 --save_dir "storage/fixed_time_compositional_COS" +============================================================ + +real 2m8.291s +user 5m46.844s +sys 0m17.067s diff --git a/examples/compositional_analysis/storage/results/fixed_time_compositional_SOCXS.txt b/examples/compositional_analysis/storage/results/fixed_time_compositional_SOCXS.txt new file mode 100644 index 0000000..c41aa8f --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_time_compositional_SOCXS.txt @@ -0,0 +1,69 @@ +Running COMPOSITIONAL SMC on primitive cases (what compositional will use) +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario C +Launching scenario X +Launching scenario S +Launching scenario O +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. + +[HARD STOP] Time budget (120s) reached at 120.09s +Terminating all running processes... +Terminating scenario C (PID=1889073) +Terminating scenario X (PID=1889074) +Terminating scenario S (PID=1889075) +Terminating scenario O (PID=1889076) +[INFO] Scenario C has 60 completed episodes. +[INFO] Scenario X: Removing partial episode (had 75 episodes, keeping 74) +[INFO] Scenario X has 74 completed episodes. +[INFO] Scenario S: Removing partial episode (had 107 episodes, keeping 105) +[INFO] Scenario S has 105 completed episodes. +[INFO] Scenario O: Removing partial episode (had 59 episodes, keeping 57) +[INFO] Scenario O has 57 completed episodes. + +=== Running Compositional SMC === +Estimated SOCXS: rho = 0.6843 ± 0.3473 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "SOCXS" --compositional --expert --time_budget 120 --save_dir "storage/fixed_time_compositional_SOCXS" +============================================================ + +real 2m8.564s +user 8m5.151s +sys 0m14.412s diff --git a/examples/compositional_analysis/storage/results/fixed_time_compositional_SX.txt b/examples/compositional_analysis/storage/results/fixed_time_compositional_SX.txt new file mode 100644 index 0000000..144005c --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_time_compositional_SX.txt @@ -0,0 +1,43 @@ +Running COMPOSITIONAL SMC on primitive cases (what compositional will use) +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario X +Launching scenario S +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. + +[HARD STOP] Time budget (120s) reached at 120.01s +Terminating all running processes... +Terminating scenario X (PID=1889070) +Terminating scenario S (PID=1889071) +[INFO] Scenario X has 77 completed episodes. +[INFO] Scenario S: Removing partial episode (had 105 episodes, keeping 104) +[INFO] Scenario S has 104 completed episodes. + +=== Running Compositional SMC === +Estimated SX: rho = 0.8978 ± 0.2126 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "SX" --compositional --expert --time_budget 120 --save_dir "storage/fixed_time_compositional_SX" +============================================================ + +real 2m6.924s +user 4m10.098s +sys 0m9.144s diff --git a/examples/compositional_analysis/storage/results/fixed_time_compositional_XSOC.txt b/examples/compositional_analysis/storage/results/fixed_time_compositional_XSOC.txt new file mode 100644 index 0000000..682887d --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_time_compositional_XSOC.txt @@ -0,0 +1,68 @@ +Running COMPOSITIONAL SMC on primitive cases (what compositional will use) +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario O +Launching scenario C +Launching scenario X +Launching scenario S +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. +[INFO] Use Torch PPO expert. + +[HARD STOP] Time budget (120s) reached at 120.09s +Terminating all running processes... +Terminating scenario O (PID=1889078) +Terminating scenario C (PID=1889079) +Terminating scenario X (PID=1889080) +Terminating scenario S (PID=1889081) +[INFO] Scenario O has 49 completed episodes. +[INFO] Scenario C has 54 completed episodes. +[INFO] Scenario X: Removing partial episode (had 69 episodes, keeping 68) +[INFO] Scenario X has 68 completed episodes. +[INFO] Scenario S: Removing partial episode (had 102 episodes, keeping 99) +[INFO] Scenario S has 99 completed episodes. + +=== Running Compositional SMC === +Estimated XSOC: rho = 0.7358 ± 0.3406 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "XSOC" --compositional --expert --time_budget 120 --save_dir "storage/fixed_time_compositional_XSOC" +============================================================ + +real 2m8.241s +user 7m27.822s +sys 0m15.124s diff --git a/examples/compositional_analysis/storage/results/fixed_time_monolithic_COS.txt b/examples/compositional_analysis/storage/results/fixed_time_monolithic_COS.txt new file mode 100644 index 0000000..292933c --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_time_monolithic_COS.txt @@ -0,0 +1,30 @@ +Running MONOLITHIC SMC +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario COS +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. + +[HARD STOP] Time budget (120s) reached at 120.08s +Terminating all running processes... +Terminating scenario COS (PID=1889114) +[INFO] Scenario COS has 20 completed episodes. + +=== Monolithic SMC Results === +COS: rho = 0.8500 ± 0.3037 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "COS" --expert --time_budget 120 --save_dir "storage/fixed_time_monolithic_COS" +============================================================ + +real 2m6.928s +user 1m51.736s +sys 0m6.374s diff --git a/examples/compositional_analysis/storage/results/fixed_time_monolithic_SOCXS.txt b/examples/compositional_analysis/storage/results/fixed_time_monolithic_SOCXS.txt new file mode 100644 index 0000000..4374a4b --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_time_monolithic_SOCXS.txt @@ -0,0 +1,30 @@ +Running MONOLITHIC SMC +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario SOCXS +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. + +[HARD STOP] Time budget (120s) reached at 120.08s +Terminating all running processes... +Terminating scenario SOCXS (PID=1889085) +[INFO] Scenario SOCXS has 16 completed episodes. + +=== Monolithic SMC Results === +SOCXS: rho = 0.8125 ± 0.3395 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "SOCXS" --expert --time_budget 120 --save_dir "storage/fixed_time_monolithic_SOCXS" +============================================================ + +real 2m6.443s +user 1m55.832s +sys 0m4.647s diff --git a/examples/compositional_analysis/storage/results/fixed_time_monolithic_SX.txt b/examples/compositional_analysis/storage/results/fixed_time_monolithic_SX.txt new file mode 100644 index 0000000..7895b3a --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_time_monolithic_SX.txt @@ -0,0 +1,30 @@ +Running MONOLITHIC SMC +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario SX +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. + +[HARD STOP] Time budget (120s) reached at 120.09s +Terminating all running processes... +Terminating scenario SX (PID=1889064) +[INFO] Scenario SX has 52 completed episodes. + +=== Monolithic SMC Results === +SX: rho = 1.0000 ± 0.1883 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "SX" --expert --time_budget 120 --save_dir "storage/fixed_time_monolithic_SX" +============================================================ + +real 2m5.595s +user 1m57.325s +sys 0m3.691s diff --git a/examples/compositional_analysis/storage/results/fixed_time_monolithic_XSOC.txt b/examples/compositional_analysis/storage/results/fixed_time_monolithic_XSOC.txt new file mode 100644 index 0000000..933e6bd --- /dev/null +++ b/examples/compositional_analysis/storage/results/fixed_time_monolithic_XSOC.txt @@ -0,0 +1,30 @@ +Running MONOLITHIC SMC +=== Generating Traces (Parallel - HARD STOP) === +Launching scenario XSOC +[INFO] Environment: MetaDriveEnv +[INFO] MetaDrive version: 0.4.3 +[INFO] Sensors: [lidar: Lidar(), side_detector: SideDetector(), lane_line_detector: LaneLineDetector()] +[INFO] Render Mode: none +[INFO] Horizon (Max steps per agent): 2000 +[INFO] Assets version: 0.4.3 +[INFO] Known Pipes: glxGraphicsPipe +[INFO] Start Scenario Index: 1000, Num Scenarios : 1000 +[INFO] Use Torch PPO expert. + +[HARD STOP] Time budget (120s) reached at 120.08s +Terminating all running processes... +Terminating scenario XSOC (PID=1889066) +[INFO] Scenario XSOC has 22 completed episodes. + +=== Monolithic SMC Results === +XSOC: rho = 0.7727 ± 0.2895 + +============================================================ +SUMMARY +============================================================ +Command: python compare_analysis.py --scenario "XSOC" --expert --time_budget 120 --save_dir "storage/fixed_time_monolithic_XSOC" +============================================================ + +real 2m5.619s +user 2m0.372s +sys 0m3.612s diff --git a/examples/compositional_analysis/test.py b/examples/compositional_analysis/test.py new file mode 100644 index 0000000..eb5382e --- /dev/null +++ b/examples/compositional_analysis/test.py @@ -0,0 +1,167 @@ +import warnings +warnings.filterwarnings("ignore", message="pkg_resources is deprecated") + +import os +import csv +import argparse +import numpy as np +from train import make_env +from functools import partial +import matplotlib.pyplot as plt +from stable_baselines3 import PPO +from metadrive.envs import MetaDriveEnv +from IPython.display import Image, clear_output +from metadrive.utils.doc_utils import generate_gif +from metadrive.component.map.base_map import BaseMap +from metadrive.policy.expert_policy import ExpertPolicy +from stable_baselines3.common.utils import set_random_seed +from metadrive.component.map.pg_map import MapGenerateMethod +from metadrive.utils.draw_top_down_map import draw_top_down_map +from stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Test policy in MetaDrive") + parser.add_argument( + "--seed", + type=int, + default=0, + help="Random seed for reproducibility") + parser.add_argument( + "--save-dir", + type=str, + default="storage", + help="Directory to save the trained model") + parser.add_argument( + "--model", + type=str, + help="Saved model zip") + parser.add_argument( + "--n", + type=int, + default=10, + help="Number of test samples") + parser.add_argument( + "--scenario", + type=str, + default="XX", + help="Scenario string") + parser.add_argument( + "--gif", + action="store_true", + help="Generate gifs" + ) + args = parser.parse_args() + + # while True: + # env=make_env(scenario=args.scenario, monitor=False) + # env.reset() + # ret = draw_top_down_map(env.current_map) + # # ret = env.render(mode="topdown", window=False) + # # ret = env.render(mode="topdown", + # # window=False, + # # # screen_size=(600, 600), + # # # camera_position=(50, 50) + # # ) + # env.close() + # plt.axis("off") + # plt.imshow(ret) + # plt.show() + # clear_output() + + set_random_seed(args.seed) + + scenario = int(args.scenario) if args.scenario.isdigit() else args.scenario + env = make_env(scenario=scenario, monitor=False) + + + all_traces = [] + trace_id = 0 + + if not args.gif: + csv_path = os.path.join(args.save_dir, "traces.csv") + if os.path.exists(csv_path): + os.remove(csv_path) + f = open(csv_path, "w", newline="") + writer = csv.DictWriter(f, fieldnames=["trace_id", "step", "x", "y", "heading", + "speed", "action", "reward", "label"]) + writer.writeheader() + + success_count = 0 + + for ep in range(args.n): + obs, _ = env.reset() + + if args.model: + policy = PPO.load(args.model) + else: + policy = ExpertPolicy(env.agent) + assert policy is not None + + initial_speed = np.random.uniform(low=70/3.6, high=80/3.6) + initial_velocity = env.vehicle.lane.direction * initial_speed + env.vehicle.set_velocity(initial_velocity) + + done = False + total_reward = 0.0 + step = 0 + label = False + + print(f"\n=== Episode {ep+1}/{args.n} ===") + while not done and step <= env.config.horizon: + if isinstance(policy, ExpertPolicy): + action = policy.act() + else: + action, _ = policy.predict(obs, deterministic=True) + obs, reward, done, truncated, info = env.step(action) + total_reward += reward + label = not done or info.get("arrive_dest") + + if args.gif: + env.render( + mode="topdown", + screen_record=True, + window=False + ) + else: + agent = env.agent + pos = agent.position + heading = agent.heading_theta + vel = agent.speed + + row = { + "trace_id": trace_id, + "step": step, + "x": pos[0], + "y": pos[1], + "heading": heading, + "speed": vel, + "action": action.tolist() if hasattr(action, "tolist") else action, + "reward": reward, + "label" : label + } + + writer.writerow(row) + + step += 1 + + print(f"Label: {label}") + print(f"Episode reward: {total_reward:.2f}") + + if label: + success_count += 1 + + if args.gif: + gif_path = os.path.join(args.save_dir, f"trace_{trace_id:03d}.gif") + env.top_down_renderer.generate_gif(gif_path) + print(f"Saved gif to {gif_path}") + + trace_id += 1 + + if not args.gif: + f.close() + + env.close() + + print(f"\n\nEmpirical success probability of generated traces: {success_count/args.n}") + diff --git a/examples/compositional_analysis/train.py b/examples/compositional_analysis/train.py new file mode 100644 index 0000000..bdedf3b --- /dev/null +++ b/examples/compositional_analysis/train.py @@ -0,0 +1,107 @@ +import warnings +warnings.filterwarnings("ignore", message="pkg_resources is deprecated") + +import os +import argparse +import gymnasium as gym +from functools import partial +import matplotlib.pyplot as plt +from stable_baselines3 import PPO +from metadrive.envs import MetaDriveEnv +from IPython.display import Image, clear_output +from metadrive.utils.doc_utils import generate_gif +from metadrive.component.map.base_map import BaseMap +from stable_baselines3.common.monitor import Monitor +from stable_baselines3.common.utils import set_random_seed +from metadrive.component.map.pg_map import MapGenerateMethod +from stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv +from metadrive.utils.draw_top_down_map import draw_top_down_map + + +def make_env(scenario, monitor=False): + config = MetaDriveEnv.default_config() + config.map = scenario + config.discrete_action=False + config.horizon=2000 + config.num_scenarios=1000 + config.start_seed=1000 + config.traffic_density=0.05 + config.need_inverse_traffic=True + config.accident_prob=0.0 + config.random_lane_width=False + config.random_agent_model=False + config.random_lane_num=False + if monitor: + return Monitor(MetaDriveEnv(config)) + else: + return MetaDriveEnv(config) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Train policy in MetaDrive") + parser.add_argument( + "--seed", + type=int, + default=0, + help="Random seed for reproducibility") + parser.add_argument( + "--save-dir", + type=str, + default="storage", + help="Directory to save the trained model") + parser.add_argument( + "--model", + type=str, + default=None, + help="Model zip name" + ) + parser.add_argument( + "--n-envs", + type=int, + default=16, + help="Number of parallel environments") + parser.add_argument( + "--timesteps", + type=int, + default=1_000_000, + help="Number of environment steps") + parser.add_argument( + "--scenario", + type=str, + default="2", + help="Scenario string") + args = parser.parse_args() + + # while True: + # env=make_env(monitor=False) + # env.reset() + # ret = draw_top_down_map(env.current_map) + # # ret = env.render(mode="topdown", window=False) + # # ret = env.render(mode="topdown", + # # window=False, + # # # screen_size=(600, 600), + # # # camera_position=(50, 50) + # # ) + # env.close() + # plt.axis("off") + # plt.imshow(ret) + # plt.show() + # clear_output() + + set_random_seed(args.seed) + + scenario = int(args.scenario) if args.scenario.isdigit() else args.scenario + env = SubprocVecEnv([partial(make_env, scenario, True) for _ in range(args.n_envs)]) + + model = PPO("MlpPolicy", env=env, n_steps=4096, verbose=1) + model.learn(total_timesteps=args.timesteps, log_interval=1) + env.close() + clear_output() + + if args.model is None: + arg_str = "_".join(f"{k}={v}" for k, v in vars(args).items() if k != "model") + safe_arg_str = arg_str.replace("/", "_").replace(" ", "_") + args.model = os.path.join(args.save_dir, f"model_{safe_arg_str}.zip") + + model.save(args.model) + print("Training is finished.") + diff --git a/examples/compositional_analysis/utils.py b/examples/compositional_analysis/utils.py new file mode 100644 index 0000000..3285c9e --- /dev/null +++ b/examples/compositional_analysis/utils.py @@ -0,0 +1,130 @@ +import os +import csv +import numpy as np +from stable_baselines3 import PPO +from stable_baselines3.common.utils import set_random_seed +from train import make_env + + +def generate_traces( + seed: int = 0, + save_dir: str = "storage/run0", + model_path: str = None, + expert: bool = False, + n: int = 50, + scenario: str = "XX", + gif: bool = False +): + """ + Runs MetaDrive simulation using a trained PPO model or expert policy and logs trajectory traces. + + Args: + seed (int): Random seed for reproducibility. + save_dir (str): Directory where traces or gifs will be saved. + model_path (str): Path to the trained PPO model (.zip file). Not used if expert=True. + expert (bool): If True, use expert policy instead of trained model. + n (int): Number of test episodes to run. + scenario (str or int): Scenario string or ID. + gif (bool): If True, generate top-down gifs instead of CSV traces. + """ + + if not expert: + assert model_path is not None, "You must provide a valid model_path (.zip file)" + + set_random_seed(seed) + + scenario_id = int(scenario) if str(scenario).isdigit() else scenario + env = make_env(scenario=scenario_id, monitor=False) + + if expert: + # print("USING EXPERT POLICY") + from metadrive.policy.expert_policy import ExpertPolicy + # Expert policy will be created per episode after reset + model = None + use_expert = True + else: + model = PPO.load(model_path) + use_expert = False + + all_traces = [] + trace_id = 0 + + # Create save dir + os.makedirs(save_dir, exist_ok=True) + + if not gif: + csv_path = os.path.join(save_dir, scenario, "traces.csv") + os.makedirs(os.path.dirname(csv_path), exist_ok=True) + f = open(csv_path, "w", newline="") + + writer = csv.DictWriter( + f, + fieldnames=[ + "trace_id", "step", "x", "y", "heading", + "speed", "action", "reward", "label" + ] + ) + writer.writeheader() + + for ep in range(n): + obs, _ = env.reset() + + # Create expert policy for this episode (needs current vehicle) + if use_expert: + expert_policy = ExpertPolicy(env.agent) + + initial_speed = np.random.uniform(low=70/3.6, high=80/3.6) + initial_velocity = env.agent.lane.direction * initial_speed + env.agent.set_velocity(initial_velocity) + + done = False + total_reward = 0.0 + step = 0 + label = False + + while not done and step <= env.config.horizon: + if use_expert: + action = expert_policy.act() + else: + action, _states = model.predict(obs, deterministic=True) + + obs, reward, done, truncated, info = env.step(action) + total_reward += reward + label = not done or info.get("arrive_dest") + + if gif: + env.render(mode="topdown", screen_record=True, window=False) + else: + agent = env.agent + pos = agent.position + heading = agent.heading_theta + vel = agent.speed + + row = { + "trace_id": trace_id, + "step": step, + "x": pos[0], + "y": pos[1], + "heading": heading, + "speed": vel, + "action": action.tolist() if hasattr(action, "tolist") else action, + "reward": reward, + "label": label + } + writer.writerow(row) + + step += 1 + + if gif: + gif_path = os.path.join(save_dir, f"trace_{trace_id:03d}.gif") + env.top_down_renderer.generate_gif(gif_path) + print(f"Saved gif to {gif_path}") + + trace_id += 1 + + if not gif: + f.close() + + env.close() + return + diff --git a/src/verifai/compositional_analysis.py b/src/verifai/compositional_analysis.py new file mode 100644 index 0000000..93f0d46 --- /dev/null +++ b/src/verifai/compositional_analysis.py @@ -0,0 +1,331 @@ +from dataclasses import dataclass +from pathlib import Path +from typing import Dict, List, Optional, Tuple, Sequence, Union + +import numpy as np +import pandas as pd +from scipy.stats import gaussian_kde + + +@dataclass +class ScenarioStats: + rho: float + uncertainty: float + + +class ScenarioBase: + """ + Handles loading and basic statistics of scenario trace data. + Computes empirical success probabilities and uncertainty. + """ + + REQUIRED_COLUMNS = {"trace_id", "step", "label"} + + def __init__(self, logbase: Dict[str, str], delta: float = 0.05): + """ + Args: + logbase: Dict mapping scenario names to CSV file paths + delta: Confidence level for Hoeffding bound (default 0.05 → 95% CI) + """ + self.logbase = logbase + self.delta = delta + self.data: Dict[str, pd.DataFrame] = {} + + # Load CSVs + for name, path in logbase.items(): + path_obj = Path(path) + if not path_obj.exists(): + raise FileNotFoundError(f"CSV file for scenario '{name}' not found: {path}") + df = pd.read_csv(path) + missing = self.REQUIRED_COLUMNS - set(df.columns) + if missing: + raise ValueError(f"CSV for scenario '{name}' missing columns: {missing}") + df["trace_id"] = df["trace_id"].astype(str) + self.data[name] = df + + self.success_stats: Dict[str, ScenarioStats] = {} + self._compute_success_stats() + + def _compute_success_stats(self): + for name, df in self.data.items(): + last_steps = df.sort_values("step").groupby("trace_id").tail(1) + labels = last_steps["label"].astype(float).to_numpy() + rho = labels.mean() if len(labels) > 0 else 0.0 + epsilon = np.sqrt(np.log(2 / self.delta) / (2 * len(labels))) if len(labels) > 0 else 0.0 + self.success_stats[name] = ScenarioStats(rho=rho, uncertainty=epsilon) + + def get_success_prob(self, scenario: str) -> float: + return self.success_stats[scenario].rho + + def get_success_prob_uncertainty(self, scenario: str) -> float: + return self.success_stats[scenario].uncertainty + + +class CompositionalAnalysisEngine: + """ + Computes importance-sampled success probabilities across sequential + scenarios using Gaussian KDE and uncertainty propagation. + """ + + def __init__(self, scenario_base: ScenarioBase): + self.scenario_base = scenario_base + + @staticmethod + def _normalize_features(features: np.ndarray) -> np.ndarray: + """Standardize features along each column (mean=0, std=1).""" + mean = np.mean(features, axis=0) + std = np.std(features, axis=0) + std[std == 0] = 1.0 # Avoid division by zero + return (features - mean) / std + + def check( + self, + scenario: List[str], + features: Optional[List[str]] = None, + center_feat_idx: Optional[List[int]] = None, + bw_method: str | int = 10, + ) -> Tuple[float, float]: + """ + Computes importance-sampled success probability and propagated uncertainty. + + Args: + scenario: Ordered list of scenario names + features: Optional list of features to include in KDE + norm_feat_idx: Optional indices of features to normalize + + Returns: + Tuple of (rho_estimate, uncertainty) + """ + if len(scenario) == 0: + raise ValueError("Scenario list must contain at least one scenario.") + + n = len(scenario) + if n == 1: + result = self.scenario_base.success_stats[scenario] + return result.rho, result.uncertainty + + first_scenario_result = self.scenario_base.success_stats[scenario[0]] + + rho = first_scenario_result.rho + eps_rho_ratios = [first_scenario_result.uncertainty/rho] + + delta = self.scenario_base.delta + per_step_delta = delta / n # union bound + + for i in range(len(scenario) - 1): + s_name, t_name = scenario[i], scenario[i+1] + df_s, df_t = self.scenario_base.data[s_name], self.scenario_base.data[t_name] + + # Select successful endpoints + s_last = df_s.sort_values("step").groupby("trace_id").tail(1) + s_last = s_last[s_last["label"] == True] + t_first = df_t.sort_values("step").groupby("trace_id").head(1) + t_last = df_t.sort_values("step").groupby("trace_id").tail(1) + + # KDE features + if features: + s_last_features = s_last[features].to_numpy() + t_first_features = t_first[features].to_numpy() + if s_last_features.shape[0] < 2 or t_first_features.shape[0] < 2: + return 0.0, 0.0 + if center_feat_idx: + for j in center_feat_idx: + s_last_features[:, j] = s_last_features[:, j] - np.mean(s_last_features[:, j]) # center + t_first_features[:, j] = t_first_features[:, j] - np.mean(t_first_features[:, j]) # center + else: + raise ValueError("Feature list must be provided for KDE.") + + s_last_features, t_first_features = s_last_features.T, t_first_features.T + + kde_s_last = gaussian_kde(s_last_features, bw_method=bw_method) + kde_t_first = gaussian_kde(t_first_features, bw_method=bw_method) + + p_vals = kde_s_last(t_first_features) + q_vals = kde_t_first(t_first_features) + + weights = np.nan_to_num(p_vals / q_vals, nan=0.0, posinf=0.0, neginf=0.0) + + labels_t_last = t_last["label"].astype(float).to_numpy() + + rho_step = np.sum(weights * labels_t_last) / np.sum(weights) # P(T | S) + rho *= rho_step # P(T | S) * P(S) = P(S, T) + + N_eff = np.sum(weights)**2 / np.sum(weights**2) + epsilon_i = np.sqrt(np.log(2 / per_step_delta) / (2 * N_eff)) + eps_rho_ratios.append(epsilon_i / rho_step) + + # Multiplicative error + uncertainty = rho * np.sqrt(np.sum([eps_rho_ratios**2 for eps_rho_ratios in eps_rho_ratios])) + + return rho, uncertainty + + def falsify( + self, + scenario: Union[str, Sequence[str]], + features: Optional[List[str]] = None, + center_feat_idx: Optional[List[int]] = None, + align_feat_idx: Optional[List[int]] = None, + bw_method: str | int = 10, + ) -> Tuple[Optional[pd.DataFrame], float]: + """ + Generates a counterexample trace using the given traces. + + Args: + scenario: Ordered list of scenario names + features: Optional list of features to include in KDE + norm_feat_idx: Optional indices of features to normalize + align_feat_idx: Optional indices of features to align + + Returns: + Trace + """ + if len(scenario) == 0: + raise ValueError("Scenario list must contain at least one scenario.") + + cex = None + n = len(scenario) + + if n == 1: + t_name = scenario[0] + df_t = self.scenario_base.data[t_name] + + t_traces = df_t.sort_values("step").groupby("trace_id") + t_first = t_traces.head(1).sort_values("trace_id") + t_last = t_traces.tail(1).sort_values("trace_id") + + fail_idx = (t_last["label"] == False).to_numpy() + t_first = t_first[fail_idx].sort_values("trace_id") + t_last = t_last[fail_idx].sort_values("trace_id") + + if t_first.empty or t_last.empty: + return None + + t_last_features = t_last[features].to_numpy() + if t_last_features.shape[0] < 1: + return None + elif t_last_features.shape[0] <= t_last_features.shape[1]: + random_idx = np.random.randint(t_last_features.shape[0]) + t_trace_id = t_last.iloc[random_idx]["trace_id"] + t_trace = t_traces.get_group(t_trace_id) + return t_trace + + kde_t_last = gaussian_kde(t_last_features.T, bw_method=bw_method) + t_last_prob = kde_t_last(t_last_features.T) + t_idx = np.argmax(t_last_prob) + t_trace_id = t_first.iloc[t_idx]["trace_id"] + t_trace = t_traces.get_group(t_trace_id) + return t_trace + + for i in reversed(range(n - 1)): + s_name, t_name = scenario[i], scenario[i+1] + df_s, df_t = self.scenario_base.data[s_name], self.scenario_base.data[t_name] + + s_traces = df_s.sort_values("step").groupby("trace_id") + s_last = s_traces.tail(1).sort_values("trace_id") + s_last = s_last[s_last["label"] == True].sort_values("trace_id") + + if cex is None: + t_traces = df_t.sort_values("step").groupby("trace_id") + t_first = t_traces.head(1).sort_values("trace_id") + t_last = t_traces.tail(1).sort_values("trace_id") + + fail_idx = (t_last["label"] == False).to_numpy() + t_first = t_first[fail_idx].sort_values("trace_id") + t_last = t_last[fail_idx].sort_values("trace_id") + + if t_first.empty or t_last.empty: + continue + + # KDE features + if features: + s_last_features = s_last[features].to_numpy() + if s_last_features.shape[0] < 2: + continue + if cex is None: + t_first_features = t_first[features].to_numpy() + if t_first_features.shape[0] < 2: + continue + if center_feat_idx: + for j in center_feat_idx: + s_last_features[:, j] = s_last_features[:, j] - np.mean(s_last_features[:, j]) + if cex is None: + t_first_features[:, j] = t_first_features[:, j] - np.mean(t_first_features[:, j]) + else: + raise ValueError("Feature list must be provided for KDE.") + + if cex is None: + if t_first_features.shape[0] <= t_first_features.shape[1]: + random_idx = np.random.randint(t_first_features.shape[0]) + t_trace_id = t_first.iloc[random_idx]["trace_id"] + t_trace = t_traces.get_group(t_trace_id) + + # compute Euclidean distances + diffs = s_last_features - t_first_features[random_idx].reshape(1, -1) + dists = np.linalg.norm(diffs, axis=1) + + # choose the s trace with minimum distance + s_idx = int(np.argmin(dists)) + s_trace_id = s_last.iloc[s_idx]["trace_id"] + s_trace = s_traces.get_group(s_trace_id) + + else: + kde_s_last = gaussian_kde(s_last_features.T, bw_method=bw_method) + kde_t_first = gaussian_kde(t_first_features.T, bw_method=bw_method) + + s_last_prob = kde_t_first(s_last_features.T) + t_first_prob = kde_s_last(t_first_features.T) + + s_idx = np.argmax(s_last_prob) + t_idx = np.argmax(t_first_prob) + + s_trace_id = s_last.iloc[s_idx]["trace_id"] + t_trace_id = t_first.iloc[t_idx]["trace_id"] + + s_trace = s_traces.get_group(s_trace_id) + t_trace = t_traces.get_group(t_trace_id) + + if align_feat_idx: + for idx in align_feat_idx: + s_feat = s_trace[features[idx]] + t_feat = t_trace[features[idx]] + offset = s_feat.iloc[-1] - t_feat.iloc[0] + t_trace.loc[:, features[idx]] = t_feat + offset + + cex = t_trace + + else: + if align_feat_idx: + compare_idx = align_feat_idx + else: + compare_idx = list(range(len(features))) + + # build arrays + # s_last_features rows correspond to s_last (they were computed above) + s_feat_mat = s_last_features[:, compare_idx] # shape (num_s_last, k) + cex_first = cex[features].iloc[0].to_numpy()[compare_idx] # shape (k,) + + # compute Euclidean distances + diffs = s_feat_mat - cex_first.reshape(1, -1) + dists = np.linalg.norm(diffs, axis=1) + + # choose the s trace with minimum distance + s_idx = int(np.argmin(dists)) + s_trace_id = s_last.iloc[s_idx]["trace_id"] + s_trace = s_traces.get_group(s_trace_id) + + # Align cex to s_trace if align_feat_idx provided + if align_feat_idx: + for idx in align_feat_idx: + s_feat = s_trace[features[idx]] + cex_feat = cex[features[idx]] + offset = s_feat.iloc[-1] - cex_feat.iloc[0] + cex.loc[:, features[idx]] = cex_feat + offset + + cex = pd.concat([s_trace, cex]) + + if cex is None: + return None + + final_features = [feat for feat in features] + ["label"] + return cex[final_features].reset_index(drop=True) +