-
-
Notifications
You must be signed in to change notification settings - Fork 40
[WIP]: feat adding AI logger #390
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
65cb289
88c34ce
33cbe2b
0c7161a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -21,6 +21,15 @@ services: | |
- REDIS_PASSWORD=${REDIS_PASSWORD} | ||
sauravpanda marked this conversation as resolved.
Show resolved
Hide resolved
sauravpanda marked this conversation as resolved.
Show resolved
Hide resolved
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Comment: Changes made to sensitive file Solution: NA Reason For Comment: Changes were made to docker-compose.yml, which needs review |
||
ports: | ||
- "6379:6379" | ||
|
||
ollama: | ||
image: ollama/ollama:latest | ||
container_name: ollama | ||
volumes: | ||
- ollama:/root/.ollama | ||
ports: | ||
- 11434:11434 | ||
restart: unless-stopped | ||
|
||
secrets: | ||
github_app_pem: | ||
|
Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
@@ -9,6 +9,7 @@ | |||||||||||||||||||||||||||||||||||||
from github_app.github_helper.utils import is_github_signature_valid | ||||||||||||||||||||||||||||||||||||||
from kaizen.utils.config import ConfigData | ||||||||||||||||||||||||||||||||||||||
import logging | ||||||||||||||||||||||||||||||||||||||
from kaizen.kaizenlog.service import analyze_logs | ||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||
# from cloudcode.generator.ui import E2ETestGenerator | ||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||
|
@@ -20,6 +21,17 @@ | |||||||||||||||||||||||||||||||||||||
app = FastAPI() | ||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||
@app.route("/analyze-logs", methods=["POST"]) | ||||||||||||||||||||||||||||||||||||||
async def analyze_logs_endpoint(request: Request): | ||||||||||||||||||||||||||||||||||||||
payload = await request.json() | ||||||||||||||||||||||||||||||||||||||
log_data = payload.get("log_data") | ||||||||||||||||||||||||||||||||||||||
if log_data: | ||||||||||||||||||||||||||||||||||||||
analysis_result = analyze_logs(log_data) | ||||||||||||||||||||||||||||||||||||||
return JSONResponse(content={"analysis": analysis_result}) | ||||||||||||||||||||||||||||||||||||||
else: | ||||||||||||||||||||||||||||||||||||||
return JSONResponse(content={"error": "Missing log data"}, status_code=400) | ||||||||||||||||||||||||||||||||||||||
sauravpanda marked this conversation as resolved.
Show resolved
Hide resolved
Comment on lines
+24
to
+32
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Comment: The new 'analyze-logs' endpoint has been added to the 'github_app/main.py' file, but the 'analyze_logs' function is not implemented. Solution: Implement the 'analyze_logs' function in the 'kaizen/logger/analyzer.py' file. This function should take the log data from the request, create a prompt for the Ollama server, send the prompt to the Ollama server, and return the analysis results. Reason For Comment: The 'analyze-logs' endpoint calls the 'analyze_logs' function, but this function is not defined in the provided code. This will cause the application to fail when the endpoint is accessed.
Suggested change
|
||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||
|
||||||||||||||||||||||||||||||||||||||
@app.post("/github-webhook") | ||||||||||||||||||||||||||||||||||||||
async def handle_webhook(request: Request, background_tasks: BackgroundTasks): | ||||||||||||||||||||||||||||||||||||||
payload = await request.json() | ||||||||||||||||||||||||||||||||||||||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
import requests | ||
import json | ||
import re | ||
|
||
def create_prompt(log_data): | ||
prompt = f"You are an AI assistant tasked with analyzing log data and identifying potential issues or errors. Here is the log data:\n\n{log_data}\n\nPlease analyze the log data and provide a concise summary of any potential issues or errors detected, along with their severity (e.g., low, medium, high), timestamp, and any relevant details or context." | ||
return prompt | ||
|
||
def analyze_logs(prompt, ollama_server_url): | ||
sauravpanda marked this conversation as resolved.
Show resolved
Hide resolved
|
||
headers = {"Content-Type": "application/json"} | ||
data = {"prompt": prompt, "model":"mistral"} | ||
response = requests.post(ollama_server_url, headers=headers, data=json.dumps(data)) | ||
|
||
if response.status_code == 200: | ||
model_response = response.json()["response"] | ||
return model_response | ||
else: | ||
print(f"Error: {response.status_code} - {response.text}") | ||
return None | ||
|
||
def parse_response(response, log_data): | ||
lines = response.strip().split("\n") | ||
parsed_response = [] | ||
log_lines = log_data.strip().split("\n") | ||
|
||
for line in lines: | ||
line = line.strip() | ||
if line.startswith("- Potential issue:"): | ||
issue = {"severity": None, "timestamp": None, "details": []} | ||
issue_parts = line.split(": ", 1) | ||
if len(issue_parts) > 1: | ||
issue["details"].append(issue_parts[1].strip()) | ||
parsed_response.append(issue) | ||
elif line.startswith("Severity:"): | ||
severity = line.split(": ", 1)[1].strip().lower() | ||
parsed_response[-1]["severity"] = severity | ||
elif line.startswith("Timestamp:"): | ||
timestamp = line.split(": ", 1)[1].strip() | ||
parsed_response[-1]["timestamp"] = extract_timestamp(timestamp, log_lines) | ||
elif parsed_response and line: | ||
parsed_response[-1]["details"].append(line) | ||
|
||
return parsed_response | ||
|
||
def extract_timestamp(timestamp_text, log_lines): | ||
for line in log_lines: | ||
if timestamp_text in line: | ||
match = re.search(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', line) | ||
if match: | ||
return match.group() | ||
return None | ||
|
||
def main(): | ||
log_data = """ | ||
2023-05-25 12:34:56 [ERROR] NullPointerException in com.example.app.service.UserService | ||
2023-05-25 12:35:12 [WARNING] Low disk space on /var/log (only 10% free) | ||
2023-05-25 12:36:08 [INFO] Application started successfully | ||
""" | ||
|
||
prompt = create_prompt(log_data) | ||
ollama_server_url = "http://your-ollama-server.com/analyze" | ||
sauravpanda marked this conversation as resolved.
Show resolved
Hide resolved
sauravpanda marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
model_response = analyze_logs(prompt, ollama_server_url) | ||
if model_response: | ||
parsed_response = parse_response(model_response, log_data) | ||
print(json.dumps(parsed_response, indent=2)) | ||
|
||
if __name__ == "__main__": | ||
main() |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
import logging | ||
import requests | ||
import sys | ||
import traceback | ||
|
||
|
||
class KaizenLogHandler(logging.Handler): | ||
def __init__(self, service_url): | ||
super().__init__() | ||
self.service_url = service_url | ||
|
||
def emit(self, record): | ||
log_entry = self.format(record) | ||
try: | ||
response = requests.post(self.service_url, data={"log_data": log_entry}) | ||
response.raise_for_status() | ||
analysis_result = response.json().get("analysis") | ||
if analysis_result: | ||
print(f"Potential issue detected: {analysis_result}") | ||
except requests.exceptions.RequestException as e: | ||
print(f"Error sending log to KaizenLog service: {e}") | ||
|
||
|
||
def exception_handler(exc_type, exc_value, exc_traceback, service_url): | ||
exception_info = "".join( | ||
traceback.format_exception(exc_type, exc_value, exc_traceback) | ||
) | ||
try: | ||
response = requests.post(service_url, data={"log_data": exception_info}) | ||
response.raise_for_status() | ||
analysis_result = response.json().get("analysis") | ||
if analysis_result: | ||
print(f"Potential issue detected: {analysis_result}") | ||
except requests.exceptions.RequestException as e: | ||
print(f"Error sending log to KaizenLog service: {e}") | ||
|
||
|
||
def init(service_url): | ||
handler = KaizenLogHandler(service_url) | ||
logger = logging.getLogger() | ||
logger.addHandler(handler) | ||
sys.excepthook = lambda exc_type, exc_value, exc_traceback: exception_handler( | ||
exc_type, exc_value, exc_traceback, service_url | ||
) | ||
sauravpanda marked this conversation as resolved.
Show resolved
Hide resolved
|
Uh oh!
There was an error while loading. Please reload this page.