Skip to content
Merged
Show file tree
Hide file tree
Changes from 14 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 16 additions & 6 deletions cli/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,12 @@
from rich.progress import (
Progress, SpinnerColumn, TextColumn, TimeElapsedColumn
)
from typing import Dict, Any, Optional, Callable
from typing import Dict, Any, Callable
from rich import box
from core.runner import execute_prompt_tests, execute_rerun_test
from core.config_manager.cli_adapter import CLIConfigAdapter
from core.analytics.tracker import analytics_tracker
from core.analytics.tracker import UsageEvent, ErrorEvent, InteractionType


def dict_to_cli_table(
Expand Down Expand Up @@ -120,6 +122,7 @@ def test(config_path, prompt, strategy, provider, output, report, parallel, verb
cli_adapter = CLIConfigAdapter()

try:
analytics_tracker.track(UsageEvent(name="test", interaction_type=InteractionType.CLI))
# Load configuration from CLI arguments
cli_adapter.load_from_cli(
config_path=config_path,
Expand Down Expand Up @@ -160,13 +163,16 @@ def test(config_path, prompt, strategy, provider, output, report, parallel, verb

except FileNotFoundError as e:
click.echo(str(e), err=True)
analytics_tracker.track(ErrorEvent(name="test", interaction_type=InteractionType.CLI, error_msg=str(e)))
sys.exit(1)
except Exception as e:
click.echo(f"Error processing configuration: {e}", err=True)
analytics_tracker.track(ErrorEvent(name="test", interaction_type=InteractionType.CLI, error_msg=str(e)))
sys.exit(1)

# Run the tests with a progress indicator
console.print("\nRunning tests...")

with Progress(
SpinnerColumn(),
TextColumn("[bold blue]{task.description}"),
Expand All @@ -176,6 +182,7 @@ def test(config_path, prompt, strategy, provider, output, report, parallel, verb
report_data = execute_prompt_tests(config_dict=runner_config)
progress.update(task, completed=True)


console.print("[bold green]Tests completed successfully![/]")
console.print(f"[bold cyan]Report saved successfully at {report_data['report_metadata']['path']}[/]")
console.print("\n")
Expand Down Expand Up @@ -238,6 +245,7 @@ def test(config_path, prompt, strategy, provider, output, report, parallel, verb
@click.option('--summary', '-s', is_flag=True, help='Show only summary statistics')
def report(report_file, format, summary):
"""Analyze and view previous test results. If no file is specified, uses the latest report."""
analytics_tracker.track(UsageEvent(name="report", interaction_type=InteractionType.CLI))
try:
# If no report file is specified, find the latest one
if not report_file:
Expand Down Expand Up @@ -298,11 +306,13 @@ def report(report_file, format, summary):
f.write("</body></html>")
click.echo(f"HTML report saved to {html_path}")

except FileNotFoundError:
click.echo(f"Report file not found: {report_file}", err=True)
except FileNotFoundError as e:
analytics_tracker.track(ErrorEvent(name="report", interaction_type=InteractionType.CLI, error_msg=str(e)))
click.echo(f"Error: Report file not found: {e}", err=True)
sys.exit(1)
except json.JSONDecodeError:
click.echo(f"Invalid JSON in report file: {report_file}", err=True)
analytics_tracker.track(ErrorEvent(name="report", interaction_type=InteractionType.CLI, error_msg="Invalid JSON format in report file."))
click.echo("Error: Invalid JSON format in report file.", err=True)
sys.exit(1)


Expand Down Expand Up @@ -398,7 +408,6 @@ def generate(type, template, output):
@click.option('--validate', '-v', help='Validate a configuration file')
def config(list, show, validate):
"""Manage configuration files."""

if list:
click.echo("Available configurations:")
# Get the list of config directories from core.config
Expand Down Expand Up @@ -447,7 +456,7 @@ def config(list, show, validate):
click.echo("Processed configuration for runner:")
click.echo(yaml.dump(runner_config, default_flow_style=False))
except FileNotFoundError as e:
click.echo(str(e), err=True)
click.echo(f"Error: Configuration file not found: {e}", err=True)
sys.exit(1)
except Exception as e:
click.echo(f"Configuration validation failed: {e}", err=True)
Expand All @@ -471,6 +480,7 @@ def rerun(prompt, report_file):
"""
# Create a rich console for showing output
console = Console()
analytics_tracker.track(UsageEvent(name="rerun", interaction_type=InteractionType.CLI))
# Validate inputs
if not prompt:
console.print("[red]Error: System prompt is required.[/red]")
Expand Down
17 changes: 17 additions & 0 deletions core/analytics/client.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import os
import uuid

def get_client_id() -> str:
"""Retrieve or create a unique, anonymous client ID for this user."""
path = os.path.join(os.path.expanduser("~"), ".compliant-llm", ".client-id")
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

os.path.expanduser("~"): this will probably not work across platforms.

please look into Path.home()

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Screenshot 2025-05-28 at 9 05 47 PM

Updated

os.makedirs(os.path.dirname(path), exist_ok=True)

if os.path.exists(path):
with open(path, "r") as f:
return f.read().strip()

# Generate and save a new UUID
new_id = str(uuid.uuid4())
with open(path, "w") as f:
f.write(new_id)
return new_id
183 changes: 183 additions & 0 deletions core/analytics/tracker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,183 @@
import os
import uuid
import logging
from enum import Enum
from typing import Optional
from pydantic import BaseModel, Field

# OpenTelemetry core
from opentelemetry import trace
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter
from azure.monitor.opentelemetry.exporter import AzureMonitorTraceExporter, AzureMonitorMetricExporter

# OpenTelemetry metrics
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.metrics import set_meter_provider, get_meter_provider
from opentelemetry.sdk.metrics.export import (
PeriodicExportingMetricReader,
ConsoleMetricExporter,
)

# ----------------------------
# Logging
# ----------------------------
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)

# ----------------------------
# Opt-out utility
# ----------------------------
def is_analytics_enabled() -> bool:
return os.getenv("DISABLE_COMPLIANT_LLM_TELEMETRY", "false").lower() != "true"

# ----------------------------
# Client ID utility (you should define this)
# ----------------------------
from .client import get_client_id

# ----------------------------
# Event Type Enums and Models
# ----------------------------
class EventType(str, Enum):
USAGE = "usage"
ERROR = "error"

class InteractionType(str, Enum):
CLI = "cli"
DASHBOARD = "dashboard"
API = "api"
BATCH = "batch"

class BaseEvent(BaseModel):
name: str
interaction_type: InteractionType
client_id: Optional[str] = Field(default_factory=get_client_id)
type: EventType

class UsageEvent(BaseEvent):
type: EventType = EventType.USAGE

class ErrorEvent(BaseEvent):
error_msg: str
type: EventType = EventType.ERROR

# ----------------------------
# Tracker Class
# ----------------------------
class AnalyticsTracker:
def __init__(self):
self.enabled = is_analytics_enabled()
if not self.enabled:
logger.info("🔕 Analytics disabled by user opt-out.")
self.tracer = None
self.usage_counter = None
self.error_counter = None
return

instrumentation_key = os.getenv("AZURE_INSTRUMENTATION_KEY", "")
ingestion_endpoint = os.getenv("AZURE_INGESTION_ENDPOINT", "")

# Resource
resource = Resource.create({
SERVICE_NAME: "compliant-llm",
"service.version": "1.0.0",
"environment": os.getenv("ENVIRONMENT", "prod")
})

# Initialize tracing
try:
trace_exporter = AzureMonitorTraceExporter(
connection_string=f"InstrumentationKey={instrumentation_key};IngestionEndpoint={ingestion_endpoint}"
)
trace.set_tracer_provider(TracerProvider(resource=resource))
tracer_provider = trace.get_tracer_provider()
tracer_provider.add_span_processor(BatchSpanProcessor(trace_exporter))
tracer_provider.add_span_processor(BatchSpanProcessor(ConsoleSpanExporter()))
self.tracer = trace.get_tracer("compliant-llm")
logger.info("✅ Azure Monitor traces initialized.")
except Exception as e:
logger.error(f"❌ Failed to initialize tracing: {e}")
self.tracer = None

# Initialize metrics
try:
metric_exporter = AzureMonitorMetricExporter(
connection_string=f"InstrumentationKey={instrumentation_key};IngestionEndpoint={ingestion_endpoint}"
)
metric_readers = [
PeriodicExportingMetricReader(ConsoleMetricExporter()),
PeriodicExportingMetricReader(metric_exporter)
]
meter_provider = MeterProvider(resource=resource, metric_readers=metric_readers)
set_meter_provider(meter_provider)
self.meter = get_meter_provider().get_meter("compliant-llm")
self.usage_counter = self.meter.create_counter(
name="compliant_llm.command_invocations",
description="Number of CLI/dashboard/API commands invoked"
)
self.error_counter = self.meter.create_counter(
name="compliant_llm.errors",
description="Number of errors encountered"
)
logger.info("✅ Azure Monitor metrics initialized.")
except Exception as e:
logger.error(f"❌ Failed to initialize metrics: {e}")
self.usage_counter = None
self.error_counter = None

def track(self, event: BaseEvent):
if not self.enabled:
return # Opted out

logger.info(f"TRACKING EVENT: {event.name} ({event.type.value})")

# --- Tracing ---
try:
with self.tracer.start_as_current_span(f"{event.type.value}:{event.name}") as span:
span.set_attribute("interaction_type", event.interaction_type.value)
span.set_attribute("event_type", event.type.value)
span.set_attribute("command", event.name)
if event.client_id:
span.set_attribute("client_id", event.client_id)
if isinstance(event, ErrorEvent):
span.set_attribute("error_msg", event.error_msg[:100])
except Exception as e:
logger.error(f"❌ Error during tracing: {e}")

# --- Metrics ---
attributes = {
"interaction_type": event.interaction_type.value,
"name": event.name
}
if event.client_id:
attributes["client_id"] = event.client_id
if isinstance(event, ErrorEvent):
attributes["error_msg"] = event.error_msg[:100]

try:
if event.type == EventType.USAGE and self.usage_counter:
self.usage_counter.add(1, attributes)
elif event.type == EventType.ERROR and self.error_counter:
self.error_counter.add(1, attributes)
except Exception as e:
logger.error(f"❌ Error during metrics recording: {e}")

# ----------------------------
# Usage Tracking Decorator
# ----------------------------
def track_usage(name: str, interaction_type: InteractionType = InteractionType.CLI):
def decorator(func):
def wrapper(*args, **kwargs):
if analytics_tracker.enabled:
event = UsageEvent(name=name, interaction_type=interaction_type)
analytics_tracker.track(event)
return func(*args, **kwargs)
return wrapper
return decorator

# ----------------------------
# Global Tracker Instance
# ----------------------------
analytics_tracker = AnalyticsTracker()
Binary file added docs/images/controls_tested.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/images/nist.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
10 changes: 9 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,15 @@ dependencies = [
"aiofiles",
"plotly",
"psutil",
"markdown"
"markdown",
"opentelemetry-api",
"opentelemetry-sdk",
"opentelemetry-exporter-otlp",
"opentelemetry-instrumentation",
"azure-monitor-opentelemetry-exporter",
"azure-core",
"azure-identity",
"azure-monitor-opentelemetry"
]

[project.scripts]
Expand Down
8 changes: 8 additions & 0 deletions requirements-lock.txt
Original file line number Diff line number Diff line change
Expand Up @@ -83,3 +83,11 @@ tzdata==2025.2
urllib3==2.4.0
yarl==1.20.0
zipp==3.21.0
opentelemetry-api
opentelemetry-sdk
opentelemetry-exporter-otlp
opentelemetry-instrumentation
azure-monitor-opentelemetry-exporter
azure-core
azure-identity
azure-monitor-opentelemetry
10 changes: 9 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,12 @@ pytest
aiofiles
plotly
psutil
markdown
markdown
opentelemetry-api
opentelemetry-sdk
opentelemetry-exporter-otlp
opentelemetry-instrumentation
azure-monitor-opentelemetry-exporter
azure-core
azure-identity
azure-monitor-opentelemetry
13 changes: 13 additions & 0 deletions ui/dashboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@
from dotenv import load_dotenv, set_key, get_key
import socket
from core.config_manager.ui_adapter import UIConfigAdapter

from rich.console import Console
from ui.constants.provider import PROVIDER_SETUP
from core.analytics.tracker import UsageEvent, InteractionType, ErrorEvent, analytics_tracker

console = Console()
load_dotenv()
Expand Down Expand Up @@ -119,8 +121,19 @@ def run_test(prompt, selected_strategies, config):
adapter = UIConfigAdapter()
# adapter.update_config(config)
results = adapter.run_test(prompt, selected_strategies, config)
analytics_tracker.track(UsageEvent(
name="test",
interaction_type=InteractionType.DASHBOARD,
command="run_test"
))
return json.dumps(results), ""
except Exception as e:
analytics_tracker.track(ErrorEvent(
name="test",
interaction_type=InteractionType.DASHBOARD,
command="run_test",
error_msg=str(e)
))
return "", str(e)

def render_beautiful_json_output(json_output):
Expand Down