Skip to content
110 changes: 75 additions & 35 deletions core/config_manager/ui_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from typing import Dict, List, Any, Optional
from .config import ConfigManager, DEFAULT_REPORTS_DIR
from core.runner import execute_prompt_tests_with_orchestrator
from core.data_store import model_config_store
from rich.console import Console
from rich.progress import (
Progress, SpinnerColumn, TextColumn, TimeElapsedColumn
Expand All @@ -30,62 +31,101 @@ def __init__(self, config_manager: Optional[ConfigManager] = None):
"output_path": {"path": str(DEFAULT_REPORTS_DIR), "filename": "report"}, # Default output path
}

def run_test(self, prompt: str, strategies: List[str], config: Dict[str, Any]) -> Dict[str, Any]:
def run_test(self, config_id: str, prompt_override: Optional[str] = None, strategies_override: Optional[List[str]] = None) -> Dict[str, Any]:
"""
Run tests with UI-specific configuration.
Run tests using a stored configuration profile, with optional overrides.

Args:
prompt: The system prompt to test
strategies: List of test strategies to use

config_id: The ID of the configuration profile to use.
prompt_override: Optional new prompt content to use for this run.
strategies_override: Optional new list of strategies to use for this run.

Returns:
Dictionary containing test results
Dictionary containing test results.

Raises:
ValueError: If required parameters are missing
ValueError: If the configuration profile is not found or required parameters are missing.
"""
if not prompt:
raise ValueError("Prompt is required")
if not strategies:
raise ValueError("At least one strategy is required")
full_runner_config = self.get_profile(config_id)
if not full_runner_config:
raise ValueError(f"Configuration profile with ID '{config_id}' not found.")

# Make a copy to avoid modifying the stored config directly with temporary overrides
test_run_config = full_runner_config.copy()
test_run_config['provider'] = test_run_config # Ensure provider dict is also a copy

# Apply overrides if any
if prompt_override is not None:
# Assuming prompt is stored as {'content': '...'}
if 'prompt' not in test_run_config or not isinstance(test_run_config.get('prompt'), dict):
test_run_config['prompt'] = {}
test_run_config['prompt']['content'] = prompt_override

if strategies_override is not None:
test_run_config['strategies'] = strategies_override

# Create test configuration
api_key_key = f"{config['provider_name'].upper()}_API_KEY"
api_key = os.getenv(api_key_key, 'n/a') or get_key(".env", api_key_key)

test_config = {
"prompt": {"content": prompt},
"strategies": strategies,
"provider": {
"provider_name": f"{config['provider_name']}/{config['model']}",
"model": f"{config['provider_name']}/{config['model']}",
"api_key": api_key,
},
"temperature": self.default_config["temperature"],
"timeout": self.default_config["timeout"],
"max_tokens": self.default_config["max_tokens"],
"output_path": self.default_config["output_path"]
}
test_run_config['output_path'] = self.default_config['output_path']

# Ensure essential keys are present (they should be from the stored config)
if 'prompt' not in test_run_config or (isinstance(test_run_config.get('prompt'), dict) and 'content' not in test_run_config.get('prompt', {})):
if prompt_override is None: # only raise if no override was given
raise ValueError("Prompt content is missing in the configuration and no override provided.")
if 'strategies' not in test_run_config or not test_run_config['strategies']:
if strategies_override is None: # only raise if no override was given
raise ValueError("Strategies are missing in the configuration and no override provided.")
if 'provider_name' not in test_run_config or 'provider' not in test_run_config or 'model' not in test_run_config['provider']:
raise ValueError("Provider information (provider_name, model) is missing in the configuration.")

console = Console()
console.print(f"[bold cyan]Running test with config: {test_config}[/]")

console.print(f"[bold cyan]Running test with profile ID '{config_id}':[/]")
console.print(f"[bold cyan]Effective config for run: {test_run_config}[/]")

with Progress(
SpinnerColumn(),
TextColumn("[bold blue]{task.description}"),
TimeElapsedColumn(),
) as progress:
task = progress.add_task("[cyan]Testing prompt security", total=None)
report_data = execute_prompt_tests_with_orchestrator(test_config)
# Pass the fully prepared test_run_config
report_data = execute_prompt_tests_with_orchestrator(test_run_config)
progress.update(task, completed=True)

console.print("[bold green]Tests completed successfully![/]")
console.print(f"[bold cyan]Report saved successfully at {report_data['report_metadata']['path']}[/]")
report_file_path = report_data.get('report_metadata', {}).get('path')
if report_file_path:
console.print(f"[bold cyan]Report saved successfully at {report_file_path}[/]")
# Add report to config's past_runs
model_config_store.add_report_to_config(config_id, str(report_file_path))
else:
console.print("[bold yellow]Report path not found in report data. Cannot link to profile.[/]")
console.print("\n")

# Execute the test with orchestrator
return report_data

# --- Profile Management Methods ---

def save_profile(self, runner_config_data: Dict[str, Any], profile_name: Optional[str] = None) -> str:
"""Saves a new profile or updates an existing one based on the presence of 'id' in runner_config_data.
Returns the ID of the saved/updated configuration.
"""
# model_config_store.save_config ensures 'id' is present and handles profile_name
model_config_store.save_config(runner_config_data, profile_name=profile_name)
return runner_config_data['id'] # 'id' is guaranteed by save_config

def get_profile(self, config_id: str) -> Optional[Dict[str, Any]]:
"""Retrieves a specific configuration profile by its ID."""
return model_config_store.get_config(config_id)

def list_profiles(self) -> List[Dict[str, Any]]:
"""Lists all saved configuration profiles."""
return model_config_store.list_configs()

def delete_profile(self, config_id: str) -> bool:
"""Deletes a configuration profile by its ID. Returns True if deleted."""
return model_config_store.delete_config(config_id)

# --- Existing Methods for Default/Unsaved Config ---

def update_config(self, config: Dict[str, Any]) -> None:
"""
Update the default configuration.
Expand Down
Empty file added core/data_store/__init__.py
Empty file.
88 changes: 88 additions & 0 deletions core/data_store/model_config_store.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
from pathlib import Path
from tinydb import TinyDB, Query
from typing import List, Dict, Any, Optional
import uuid

DB_DIR = Path.home() / ".compliant-llm"
CONFIG_DB_FILE = DB_DIR / "model_config.json"

# Ensure the .compliant-llm directory exists
DB_DIR.mkdir(parents=True, exist_ok=True)

_db_instance = None

def _get_table():
global _db_instance
if _db_instance is None:
_db_instance = TinyDB(CONFIG_DB_FILE)
return _db_instance.table('model_config')

def save_config(runner_config_data: Dict[str, Any], profile_name: str | None = None) -> None:
"""Saves or updates a model configuration profile."""
table = _get_table()

# Ensure 'id' exists, add if not
if 'id' not in runner_config_data:
runner_config_data['id'] = str(uuid.uuid4())

# Ensure 'past_runs' exists if it's a new config or not present
if 'past_runs' not in runner_config_data:
runner_config_data['past_runs'] = []

# Add/update profile_name within the document for easier access if needed
if profile_name is not None:
runner_config_data['profile_name'] = profile_name
document_to_store = runner_config_data

ConfigQuery = Query()
table.upsert(document_to_store, ConfigQuery.id == runner_config_data['id'])
print(f"Config '{profile_name}' saved.")

def get_config(id: str) -> Optional[Dict[str, Any]]:
"""Retrieves a specific model configuration profile."""
table = _get_table()
ConfigQuery = Query()
return table.get(ConfigQuery.id == id)

def list_configs() -> List[Dict[str, Any]]:
"""Lists all saved model configuration profiles."""
table = _get_table()
return table.all()

def delete_config(id: str) -> bool:
"""Deletes a model configuration profile. Returns True if deleted."""
table = _get_table()
ConfigQuery = Query()
deleted_ids = table.remove(ConfigQuery.id == id)
return len(deleted_ids) > 0

def add_report_to_config(id: str, report_file_path: str) -> bool:
"""Adds a report file path to the 'past_runs' list of a specific config."""
table = _get_table()
ConfigQuery = Query()
config_doc = table.get(ConfigQuery.id == id)

if not config_doc:
print(f"Error: Config profile '{id}' not found.")
return False

# Ensure past_runs is a list
if 'past_runs' not in config_doc or not isinstance(config_doc['past_runs'], list):
config_doc['past_runs'] = []

# Avoid duplicate entries
if report_file_path not in config_doc['past_runs']:
config_doc['past_runs'].append(report_file_path)
table.upsert(config_doc, ConfigQuery.id == id)
print(f"Report '{report_file_path}' added to config '{id}'.")
return True
else:
print(f"Report '{report_file_path}' already exists in config '{id}'.")
return False

def close_db():
"""Closes the database connection."""
global _db_instance
if _db_instance:
_db_instance.close()
_db_instance = None
3 changes: 2 additions & 1 deletion core/reporter.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@
from datetime import datetime


def save_report(report_data, output_path={"path": "reports", "filename": "report"}):

def save_report(report_data, output_path):
# Create directories if they don't exist
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
file_path = f"{output_path['path']}/{output_path['filename']}_{timestamp}.json"
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ dependencies = [
"aiofiles",
"plotly",
"psutil",
"tinydb",
"markdown",
"opentelemetry-api",
"opentelemetry-sdk",
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ aiofiles
plotly
psutil
markdown
tinydb
opentelemetry-api
opentelemetry-sdk
opentelemetry-exporter-otlp
Expand Down
Loading