Skip to content

Commit 591ea48

Browse files
Merge pull request #60 from fiddlecube/feature/uifixes
working app dashboard, new graphs for compliance
2 parents 4d0eed5 + 919b8b0 commit 591ea48

File tree

5 files changed

+115
-52
lines changed

5 files changed

+115
-52
lines changed

core/config_manager/ui_adapter.py

Lines changed: 31 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,10 @@
55
from typing import Dict, List, Any, Optional
66
from .config import ConfigManager
77
from core.runner import execute_prompt_tests_with_orchestrator
8-
8+
from rich.console import Console
9+
from rich.progress import (
10+
Progress, SpinnerColumn, TextColumn, TimeElapsedColumn
11+
)
912
class UIConfigAdapter:
1013
"""Adapter for handling UI-specific configurations and test execution."""
1114

@@ -18,13 +21,14 @@ def __init__(self, config_manager: Optional[ConfigManager] = None):
1821
"""
1922
self.config_manager = config_manager or ConfigManager()
2023
self.default_config = {
21-
"provider_name": "openai", # Default provider
22-
"model": "gpt-4o", # Default model
24+
"provider": {"name": "openai/gpt-4o"}, # Default provider
25+
# "model": "gpt-4o", # Default model
2326
"temperature": 0.7, # Default temperature
2427
"max_tokens": 2000, # Default max tokens
2528
"timeout": 30, # Default timeout in seconds
2629
"prompt": {"content": "You are a helpful assistant"}, # Default prompt
27-
"strategies": [], # Default strategies
30+
"strategies": [], # Default strategies,
31+
"output_path": {"path": "reports", "filename": "report"}, # Default output path
2832
}
2933

3034
def run_test(self, prompt: str, strategies: List[str]) -> Dict[str, Any]:
@@ -51,17 +55,34 @@ def run_test(self, prompt: str, strategies: List[str]) -> Dict[str, Any]:
5155
"prompt": {"content": prompt},
5256
"strategies": strategies,
5357
"provider": {
54-
"name": self.default_config["provider_name"],
55-
"api_key": os.getenv(f"{self.default_config['provider_name'].upper()}_API_KEY", '')
58+
"name": self.default_config["provider"]["name"],
59+
"api_key": os.getenv(f"{self.default_config['provider']['name'].upper()}_API_KEY", '')
5660
},
57-
"model": self.default_config["model"],
61+
# "model": self.default_config["model"],
5862
"temperature": self.default_config["temperature"],
5963
"timeout": self.default_config["timeout"],
60-
"max_tokens": self.default_config["max_tokens"]
64+
"max_tokens": self.default_config["max_tokens"],
65+
"output_path": self.default_config["output_path"]
6166
}
67+
console = Console()
68+
console.print(f"[bold cyan]Running test with config: {test_config}[/]")
69+
70+
71+
with Progress(
72+
SpinnerColumn(),
73+
TextColumn("[bold blue]{task.description}"),
74+
TimeElapsedColumn(),
75+
) as progress:
76+
task = progress.add_task("[cyan]Testing prompt security", total=None)
77+
report_data = execute_prompt_tests_with_orchestrator(test_config)
78+
progress.update(task, completed=True)
79+
80+
console.print("[bold green]Tests completed successfully![/]")
81+
console.print(f"[bold cyan]Report saved successfully at {report_data['report_metadata']['path']}[/]")
82+
console.print("\n")
6283

6384
# Execute the test with orchestrator
64-
return execute_prompt_tests_with_orchestrator(test_config)
85+
return report_data
6586

6687
def update_config(self, config: Dict[str, Any]) -> None:
6788
"""
@@ -80,5 +101,5 @@ def get_config(self) -> Dict[str, Any]:
80101
"""Get the current configuration."""
81102
config = self.default_config.copy()
82103
# Convert provider_name back to provider for backward compatibility
83-
config["provider"] = config.pop("provider_name", "openai")
104+
config["provider"] = config.pop("provider", "openai")
84105
return config

ui/app.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -43,28 +43,28 @@ def create_dashboard():
4343
st.error("Unable to load report data")
4444
return
4545

46-
# Sidebar for risk configuration
47-
st.sidebar.header("Risk Configuration")
48-
risk_tolerance = st.sidebar.slider("Risk Tolerance", 0, 100, 30)
46+
# # Sidebar for risk configuration
47+
# st.sidebar.header("Risk Configuration")
48+
# risk_tolerance = st.sidebar.slider("Risk Tolerance", 0, 100, 30)
4949

5050
# Main dashboard sections
5151
col1, col2, col3 = st.columns(3)
5252

5353
with col1:
54-
st.metric("Total Security Tests", report_data['metadata']['test_count'],
54+
st.metric("Total Tests Ran", report_data['metadata']['test_count'],
5555
help="Number of security assessments performed")
5656
with col2:
57-
st.metric("Passed Tests", report_data['metadata']['success_count'],
57+
st.metric("Tests breached successfully", report_data['metadata']['success_count'],
5858
help="Tests that passed security checks")
5959
with col3:
60-
st.metric("Failed Tests", report_data['metadata']['failure_count'],
60+
st.metric("Tests unsuccessful", report_data['metadata']['failure_count'],
6161
help="Tests that failed security checks")
6262

6363
# Render dashboard components
64+
render_compliance_report(report_data)
6465
render_strategy_table(report_data)
65-
render_risk_severity(report_data)
66+
# render_risk_severity(report_data)
6667
render_security_findings(report_data)
67-
render_compliance_report(report_data)
6868

6969
def main():
7070
create_dashboard()

ui/components/compliance_report.py

Lines changed: 58 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import streamlit as st
22
import pandas as pd
3-
import plotly.express as px
3+
# import plotly.express as px
4+
import plotly.graph_objects as go
45

56
def render_compliance_report(report_data):
67
"""
@@ -40,21 +41,21 @@ def render_compliance_report(report_data):
4041
df = pd.DataFrame(compliance_data)
4142

4243
# Show compliance table
43-
st.subheader("Compliance Details")
44-
st.dataframe(
45-
df,
46-
column_config={
47-
'Strategy': st.column_config.TextColumn("Strategy"),
48-
'Numerical Score': st.column_config.NumberColumn("Risk Score"),
49-
'Qualitative Score': st.column_config.TextColumn("Qualitative Score"),
50-
'Likelihood': st.column_config.TextColumn("Likelihood"),
51-
'Impact': st.column_config.TextColumn("Impact"),
52-
'FIPS Impact': st.column_config.TextColumn("FIPS Impact"),
53-
'FIPS Version': st.column_config.TextColumn("FIPS Version"),
54-
'Test Success': st.column_config.CheckboxColumn("Test Failed")
55-
},
56-
hide_index=True
57-
)
44+
# st.subheader("Compliance Details")
45+
# st.dataframe(
46+
# df,
47+
# column_config={
48+
# 'Strategy': st.column_config.TextColumn("Strategy"),
49+
# 'Numerical Score': st.column_config.NumberColumn("Risk Score"),
50+
# 'Qualitative Score': st.column_config.TextColumn("Qualitative Score"),
51+
# 'Likelihood': st.column_config.TextColumn("Likelihood"),
52+
# 'Impact': st.column_config.TextColumn("Impact"),
53+
# 'FIPS Impact': st.column_config.TextColumn("FIPS Impact"),
54+
# 'FIPS Version': st.column_config.TextColumn("FIPS Version"),
55+
# 'Test Success': st.column_config.CheckboxColumn("Test Failed")
56+
# },
57+
# hide_index=True
58+
# )
5859

5960
# Show tested controls
6061
st.subheader("Tested Controls")
@@ -69,9 +70,47 @@ def render_compliance_report(report_data):
6970
'Control ID': control.get('control_id', 'Unknown'),
7071
'Title': control.get('title', 'Unknown'),
7172
'Description': control.get('description', 'Unknown'),
72-
'Version': control.get('version', 'Unknown')
73+
'Version': control.get('version', 'Unknown'),
74+
'Breach Successful': test.get('evaluation', {}).get('passed', False)
7375
})
7476

77+
# Create DataFrame for analysis
78+
df = pd.DataFrame(controls)
79+
80+
# Aggregate breaches by Control ID
81+
control_breaches = df.groupby('Control ID').agg({
82+
'Breach Successful': 'sum' # Count of successful breaches
83+
}).reset_index()
84+
85+
# Sort by number of breaches
86+
control_breaches = control_breaches.sort_values('Breach Successful', ascending=True)
87+
88+
# Create bar chart of breaches by control
89+
fig_control_breaches = go.Figure(data=[
90+
go.Bar(
91+
x=control_breaches['Control ID'],
92+
y=control_breaches['Breach Successful'],
93+
orientation='v', # Horizontal bars
94+
marker_color=[
95+
'red' if count >= 3 else
96+
'orange' if count >= 2 else
97+
'yellow' if count >= 1 else
98+
'green'
99+
for count in control_breaches['Breach Successful']
100+
]
101+
)
102+
])
103+
104+
fig_control_breaches.update_layout(
105+
title='NIST Controls Breaches',
106+
xaxis_title='Control ID',
107+
yaxis_title='Number of Successful Breaches',
108+
height=max(350, len(control_breaches) * 30) # Dynamic height based on number of controls
109+
)
110+
111+
# Display the plot
112+
st.plotly_chart(fig_control_breaches, use_container_width=True)
113+
75114
if controls:
76115
controls_df = pd.DataFrame(controls)
77116
st.dataframe(
@@ -82,7 +121,8 @@ def render_compliance_report(report_data):
82121
'Control ID': st.column_config.TextColumn("Control ID"),
83122
'Title': st.column_config.TextColumn("Control Title"),
84123
'Description': st.column_config.TextColumn("Description"),
85-
'Version': st.column_config.TextColumn("Version")
124+
'Version': st.column_config.TextColumn("Version"),
125+
'Breach Successful': st.column_config.TextColumn("Breach Successful")
86126
},
87127
hide_index=True
88128
)

ui/components/security_findings.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,9 @@ def render_security_findings(report_data):
1818
for test in strategy['results']:
1919
findings_data.append({
2020
'Strategy': strategy_name,
21-
'Severity': test.get('severity', 'unknown'),
21+
'Severity': test.get('severity', 'Moderate'),
2222
'Category': test['category'],
23-
'Success': not test['success'], # True if test failed
23+
'Success': test.get('evaluation', {}).get('passed', False), # True if test failed
2424
'Mutation': test.get('mutation_technique', 'Unknown').replace('_', ' ').title(),
2525
'Description': test.get('description', 'No description'),
2626
'System Prompt': test.get('system_prompt', 'N/A'),
@@ -32,30 +32,30 @@ def render_security_findings(report_data):
3232
# Create DataFrame for analysis
3333
df = pd.DataFrame(findings_data)
3434

35-
# Strategy failure rates
36-
strategy_rates = df.groupby('Strategy').agg({
37-
'Success': 'mean',
38-
'Severity': lambda x: (x == 'high').mean()
35+
# Strategy to failures
36+
strategy_counts = df.groupby('Strategy').agg({
37+
'Success': 'sum', # Count of failures
38+
'Severity': lambda x: (x == 'moderate').sum() # Count of high severity
3939
}).reset_index()
4040

41-
# Bar chart of strategy failure rates
41+
# Bar chart of failure counts
4242
fig_strategy_rates = go.Figure(data=[
4343
go.Bar(
44-
x=strategy_rates['Strategy'],
45-
y=strategy_rates['Success'] * 100,
44+
x=strategy_counts['Strategy'],
45+
y=strategy_counts['Success'],
4646
marker_color=[
47-
'red' if rate > 0.5 else
48-
'orange' if rate > 0.3 else
49-
'yellow' if rate > 0.1 else
47+
'red' if count >= 5 else
48+
'orange' if count >= 3 else
49+
'yellow' if count >= 1 else
5050
'green'
51-
for rate in strategy_rates['Success']
51+
for count in strategy_counts['Success']
5252
]
5353
)
5454
])
5555
fig_strategy_rates.update_layout(
56-
title='Failure Rates by Attack Strategy',
56+
title='Breaches by Attack Strategy',
5757
xaxis_title='Attack Strategy',
58-
yaxis_title='Failure Rate (%)'
58+
yaxis_title='Breaches'
5959
)
6060

6161
# Layout for security findings

ui/dashboard.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,9 @@ def get_reports():
3838
"modified": mod_time.strftime("%Y-%m-%d %H:%M:%S"),
3939
"runtime": f"{runtime_minutes:.1f} min" if runtime_minutes >= 1 else f"{runtime_seconds:.1f} sec"
4040
})
41-
except:
41+
except Exception as e:
42+
print("Dashboard: Error processing file:", file)
43+
print("Dashboard: Error:", e)
4244
continue
4345
return sorted(reports, key=lambda x: x["modified"], reverse=True)
4446

0 commit comments

Comments
 (0)