Skip to content
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ If you want help feel free to reach out to us at:

![](docs/gif/demo.gif)


## Roadmap

- [ ] Full Application Pen Testing
Expand Down
34 changes: 28 additions & 6 deletions cli/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,11 @@
from rich.progress import (
Progress, SpinnerColumn, TextColumn, TimeElapsedColumn
)
from typing import Dict, Any, Optional, Callable
from typing import Dict, Any, Callable
from rich import box
from core.runner import execute_prompt_tests, execute_rerun_test
from core.config_manager.cli_adapter import CLIConfigAdapter
from core.analytics.tracker import analytics_tracker


def dict_to_cli_table(
Expand Down Expand Up @@ -120,6 +121,7 @@ def test(config_path, prompt, strategy, provider, output, report, parallel, verb
cli_adapter = CLIConfigAdapter()

try:
analytics_tracker.track_cli_command("test")
# Load configuration from CLI arguments
cli_adapter.load_from_cli(
config_path=config_path,
Expand Down Expand Up @@ -159,23 +161,33 @@ def test(config_path, prompt, strategy, provider, output, report, parallel, verb
runner_config['nist_compliance'] = True

except FileNotFoundError as e:
analytics_tracker.track_error("FileNotFound", f"Configuration file not found: {e}")
click.echo(str(e), err=True)
sys.exit(1)
except Exception as e:
analytics_tracker.track_error("ConfigProcessing", f"Error processing configuration: {e}")
click.echo(f"Error processing configuration: {e}", err=True)
sys.exit(1)

# Run the tests with a progress indicator
console.print("\nRunning tests...")

with Progress(
SpinnerColumn(),
TextColumn("[bold blue]{task.description}"),
TimeElapsedColumn(),
) as progress:
task = progress.add_task("[cyan]Testing prompt security", total=None)
start_time = analytics_tracker.track_test_start(
strategy or "manual",
provider or "unknown"
)
report_data = execute_prompt_tests(config_dict=runner_config)
progress.update(task, completed=True)

# Track test completion
analytics_tracker.track_test_end(start_time, report_data['metadata'].get('success_count', 0) > 0)

console.print("[bold green]Tests completed successfully![/]")
console.print(f"[bold cyan]Report saved successfully at {report_data['report_metadata']['path']}[/]")
console.print("\n")
Expand Down Expand Up @@ -239,6 +251,8 @@ def test(config_path, prompt, strategy, provider, output, report, parallel, verb
def report(report_file, format, summary):
"""Analyze and view previous test results. If no file is specified, uses the latest report."""
try:
analytics_tracker.track_cli_command("report")
analytics_tracker.track_report_view(format, summary)
# If no report file is specified, find the latest one
if not report_file:
report_dir = "reports"
Expand Down Expand Up @@ -298,11 +312,13 @@ def report(report_file, format, summary):
f.write("</body></html>")
click.echo(f"HTML report saved to {html_path}")

except FileNotFoundError:
click.echo(f"Report file not found: {report_file}", err=True)
except FileNotFoundError as e:
analytics_tracker.track_error("FileNotFound", f"Report file not found: {e}")
click.echo(f"Error: Report file not found: {e}", err=True)
sys.exit(1)
except json.JSONDecodeError:
click.echo(f"Invalid JSON in report file: {report_file}", err=True)
analytics_tracker.track_error("InvalidJSON", "Invalid JSON format in report file")
click.echo("Error: Invalid JSON format in report file.", err=True)
sys.exit(1)


Expand All @@ -312,6 +328,8 @@ def report(report_file, format, summary):
@click.option('--output', '-o', help='Output file path')
def generate(type, template, output):
"""Generate configuration files or sample prompts."""
analytics_tracker.track_cli_command("generate")
analytics_tracker.track_template_use(type, template or "default")
if type == 'config':
# Define some templates
templates = {
Expand Down Expand Up @@ -398,7 +416,7 @@ def generate(type, template, output):
@click.option('--validate', '-v', help='Validate a configuration file')
def config(list, show, validate):
"""Manage configuration files."""

analytics_tracker.track_cli_command("config")
if list:
click.echo("Available configurations:")
# Get the list of config directories from core.config
Expand Down Expand Up @@ -429,6 +447,7 @@ def config(list, show, validate):
# Output the raw configuration
click.echo(yaml.dump(cli_adapter.config_manager.config, default_flow_style=False)) # noqa: E501
except FileNotFoundError as e:
analytics_tracker.track_error("FileNotFound", f"Configuration file not found: {e}")
click.echo(str(e), err=True)
sys.exit(1)

Expand All @@ -447,9 +466,11 @@ def config(list, show, validate):
click.echo("Processed configuration for runner:")
click.echo(yaml.dump(runner_config, default_flow_style=False))
except FileNotFoundError as e:
click.echo(str(e), err=True)
analytics_tracker.track_error("FileNotFound", f"Configuration file not found: {e}")
click.echo(f"Error: Configuration file not found: {e}", err=True)
sys.exit(1)
except Exception as e:
analytics_tracker.track_error("ConfigValidation", f"Configuration validation failed: {e}")
click.echo(f"Configuration validation failed: {e}", err=True)
sys.exit(1)

Expand All @@ -471,6 +492,7 @@ def rerun(prompt, report_file):
"""
# Create a rich console for showing output
console = Console()
analytics_tracker.track_cli_command("rerun")
# Validate inputs
if not prompt:
console.print("[red]Error: System prompt is required.[/red]")
Expand Down
3 changes: 3 additions & 0 deletions cli/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import click
import importlib.metadata
from cli.commands import cli, test, report, config, generate, rerun
from core.analytics.tracker import analytics_tracker


def main():
Expand Down Expand Up @@ -63,6 +64,8 @@ def dashboard():
import sys
import os

analytics_tracker.track_cli_command("dashboard")

# Get the absolute path to the app.py file
app_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'ui', 'dashboard.py')

Expand Down
108 changes: 108 additions & 0 deletions core/analytics/tracker.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
import os
import time
from opentelemetry.sdk.resources import SERVICE_NAME, Resource
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.metrics import set_meter_provider, get_meter_provider
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader, ConsoleMetricExporter
from azure.monitor.opentelemetry.exporter import AzureMonitorMetricExporter

# Define the resource attributes for metrics
resource = Resource.create({
SERVICE_NAME: "compliant-llm",
"service.version": "1.0.0",
"environment": os.getenv("ENVIRONMENT", "prod")
})

# Configure Azure Monitor connection
try:
# Get Azure Monitor configuration from environment variables
instrumentation_key = os.getenv("AZURE_INSTRUMENTATION_KEY", "ed532436-db1f-46bb-aeef-17cb4f3dcf8b")
ingestion_endpoint = os.getenv("AZURE_INGESTION_ENDPOINT", "https://westus-0.in.applicationinsights.azure.com/")

# Create Azure Monitor exporter
exporter = AzureMonitorMetricExporter(
connection_string=(
f"InstrumentationKey={instrumentation_key};"
f"IngestionEndpoint={ingestion_endpoint}"
)
)
print("✅ Azure Monitor exporter successfully initialized.")
except Exception as e:
print(f"❌ Failed to initialize Azure Monitor exporter: {e}")
print("❌ Falling back to console exporter only.")
exporter = None

# Create a test metric to verify connection if Azure is enabled
if exporter:

# Set up the metric provider with exporters
metric_readers = [
# Local debugging
PeriodicExportingMetricReader(ConsoleMetricExporter()),
# Azure Monitor Exporter (always try to use Azure unless explicitly disabled)
PeriodicExportingMetricReader(exporter)
]

meter_provider = MeterProvider(
resource=resource,
metric_readers=metric_readers
)

set_meter_provider(meter_provider)
meter = get_meter_provider().get_meter("compliant-llm")

class AnalyticsTracker:
"""Class for tracking analytics and metrics with Azure Monitor via OpenTelemetry."""

def __init__(self):
self.total_tests = meter.create_counter("compliant_llm.total_tests", description="Total number of tests executed", unit="1")
self.success_count = meter.create_counter("compliant_llm.success_count", description="Number of successful attacks", unit="1")
self.test_duration = meter.create_histogram("compliant_llm.test_duration", description="Duration of each test in seconds", unit="s")
self.strategy_usage = meter.create_counter("compliant_llm.strategy_usage", description="Usage count per attack strategy", unit="1")
self.provider_usage = meter.create_counter("compliant_llm.provider_usage", description="Usage count per LLM provider", unit="1")
self.prompt_length_hist = meter.create_histogram("compliant_llm.prompt_length", description="Length of prompts in characters", unit="chars")
self.api_response_time = meter.create_histogram("compliant_llm.api_response_time", description="API response time in seconds", unit="s")
self.errors = meter.create_counter("compliant_llm.errors", description="Number of errors encountered", unit="1")
self.cli_command_usage = meter.create_counter("compliant_llm.cli_command_usage", description="Count of CLI commands executed", unit="1")
self.template_usage = meter.create_counter("compliant_llm.template_usage", description="Usage count per config/prompt template", unit="1")
self.report_view_usage = meter.create_counter("compliant_llm.report_view_usage", description="Report view interactions", unit="1")
self.config_validation = meter.create_counter("compliant_llm.config_validation", description="Configuration validation result", unit="1")

def track_test_start(self, strategy_name: str, provider_name: str) -> float:
self.total_tests.add(1)
self.strategy_usage.add(1, {"strategy": strategy_name})
self.provider_usage.add(1, {"provider": provider_name})
return time.time()

def track_test_end(self, start_time: float, success: bool):
duration = time.time() - start_time
self.test_duration.record(duration)
if success:
self.success_count.add(1)

def track_api_response(self, response_time: float, provider: str):
self.api_response_time.record(response_time, {"provider": provider})

def track_error(self, error_type: str, error_message: str):
self.errors.add(1, {"type": error_type, "message": error_message[:100]})

def track_cli_command(self, command_name: str):
self.cli_command_usage.add(1, {"command": command_name})

def track_template_use(self, template_type: str, template_name: str):
self.template_usage.add(1, {
"type": template_type,
"template": template_name
})

def track_report_view(self, format: str, summary: bool):
self.report_view_usage.add(1, {
"format": format,
"summary": str(summary)
})

def track_config_validation(self, result: str):
self.config_validation.add(1, {"result": result})

# Initialize global tracker
analytics_tracker = AnalyticsTracker()
Binary file added docs/images/controls_tested.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added docs/images/nist.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
10 changes: 9 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,15 @@ dependencies = [
"aiofiles",
"plotly",
"psutil",
"markdown"
"markdown",
"opentelemetry-api",
"opentelemetry-sdk",
"opentelemetry-exporter-otlp",
"opentelemetry-instrumentation",
"azure-monitor-opentelemetry-exporter",
"azure-core",
"azure-identity",
"azure-monitor-opentelemetry"
]

[project.scripts]
Expand Down
8 changes: 8 additions & 0 deletions requirements-lock.txt
Original file line number Diff line number Diff line change
Expand Up @@ -83,3 +83,11 @@ tzdata==2025.2
urllib3==2.4.0
yarl==1.20.0
zipp==3.21.0
opentelemetry-api
opentelemetry-sdk
opentelemetry-exporter-otlp
opentelemetry-instrumentation
azure-monitor-opentelemetry-exporter
azure-core
azure-identity
azure-monitor-opentelemetry
10 changes: 9 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,12 @@ pytest
aiofiles
plotly
psutil
markdown
markdown
opentelemetry-api
opentelemetry-sdk
opentelemetry-exporter-otlp
opentelemetry-instrumentation
azure-monitor-opentelemetry-exporter
azure-core
azure-identity
azure-monitor-opentelemetry