diff --git a/docker-compose.yml b/docker-compose.yml index b0f5d4dc..7a5833d3 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -21,6 +21,15 @@ services: - REDIS_PASSWORD=${REDIS_PASSWORD} ports: - "6379:6379" + + ollama: + image: ollama/ollama:latest + container_name: ollama + volumes: + - ollama:/root/.ollama + ports: + - 11434:11434 + restart: unless-stopped secrets: github_app_pem: diff --git a/github_app/main.py b/github_app/main.py index e1848fa5..71d75c02 100644 --- a/github_app/main.py +++ b/github_app/main.py @@ -9,6 +9,7 @@ from github_app.github_helper.utils import is_github_signature_valid from kaizen.utils.config import ConfigData import logging +from kaizen.kaizenlog.service import analyze_logs # from cloudcode.generator.ui import E2ETestGenerator @@ -20,6 +21,17 @@ app = FastAPI() +@app.route("/analyze-logs", methods=["POST"]) +async def analyze_logs_endpoint(request: Request): + payload = await request.json() + log_data = payload.get("log_data") + if log_data: + analysis_result = analyze_logs(log_data) + return JSONResponse(content={"analysis": analysis_result}) + else: + return JSONResponse(content={"error": "Missing log data"}, status_code=400) + + @app.post("/github-webhook") async def handle_webhook(request: Request, background_tasks: BackgroundTasks): payload = await request.json() diff --git a/kaizen/logger/analyzer.py b/kaizen/logger/analyzer.py new file mode 100644 index 00000000..5f209f58 --- /dev/null +++ b/kaizen/logger/analyzer.py @@ -0,0 +1,69 @@ +import requests +import json +import re + +def create_prompt(log_data): + prompt = f"You are an AI assistant tasked with analyzing log data and identifying potential issues or errors. Here is the log data:\n\n{log_data}\n\nPlease analyze the log data and provide a concise summary of any potential issues or errors detected, along with their severity (e.g., low, medium, high), timestamp, and any relevant details or context." + return prompt + +def analyze_logs(prompt, ollama_server_url): + headers = {"Content-Type": "application/json"} + data = {"prompt": prompt, "model":"mistral"} + response = requests.post(ollama_server_url, headers=headers, data=json.dumps(data)) + + if response.status_code == 200: + model_response = response.json()["response"] + return model_response + else: + print(f"Error: {response.status_code} - {response.text}") + return None + +def parse_response(response, log_data): + lines = response.strip().split("\n") + parsed_response = [] + log_lines = log_data.strip().split("\n") + + for line in lines: + line = line.strip() + if line.startswith("- Potential issue:"): + issue = {"severity": None, "timestamp": None, "details": []} + issue_parts = line.split(": ", 1) + if len(issue_parts) > 1: + issue["details"].append(issue_parts[1].strip()) + parsed_response.append(issue) + elif line.startswith("Severity:"): + severity = line.split(": ", 1)[1].strip().lower() + parsed_response[-1]["severity"] = severity + elif line.startswith("Timestamp:"): + timestamp = line.split(": ", 1)[1].strip() + parsed_response[-1]["timestamp"] = extract_timestamp(timestamp, log_lines) + elif parsed_response and line: + parsed_response[-1]["details"].append(line) + + return parsed_response + +def extract_timestamp(timestamp_text, log_lines): + for line in log_lines: + if timestamp_text in line: + match = re.search(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}', line) + if match: + return match.group() + return None + +def main(): + log_data = """ + 2023-05-25 12:34:56 [ERROR] NullPointerException in com.example.app.service.UserService + 2023-05-25 12:35:12 [WARNING] Low disk space on /var/log (only 10% free) + 2023-05-25 12:36:08 [INFO] Application started successfully + """ + + prompt = create_prompt(log_data) + ollama_server_url = "http://your-ollama-server.com/analyze" + + model_response = analyze_logs(prompt, ollama_server_url) + if model_response: + parsed_response = parse_response(model_response, log_data) + print(json.dumps(parsed_response, indent=2)) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/kaizen/logger/kaizenlog.py b/kaizen/logger/kaizenlog.py new file mode 100644 index 00000000..4102fafd --- /dev/null +++ b/kaizen/logger/kaizenlog.py @@ -0,0 +1,44 @@ +import logging +import requests +import sys +import traceback + + +class KaizenLogHandler(logging.Handler): + def __init__(self, service_url): + super().__init__() + self.service_url = service_url + + def emit(self, record): + log_entry = self.format(record) + try: + response = requests.post(self.service_url, data={"log_data": log_entry}) + response.raise_for_status() + analysis_result = response.json().get("analysis") + if analysis_result: + print(f"Potential issue detected: {analysis_result}") + except requests.exceptions.RequestException as e: + print(f"Error sending log to KaizenLog service: {e}") + + +def exception_handler(exc_type, exc_value, exc_traceback, service_url): + exception_info = "".join( + traceback.format_exception(exc_type, exc_value, exc_traceback) + ) + try: + response = requests.post(service_url, data={"log_data": exception_info}) + response.raise_for_status() + analysis_result = response.json().get("analysis") + if analysis_result: + print(f"Potential issue detected: {analysis_result}") + except requests.exceptions.RequestException as e: + print(f"Error sending log to KaizenLog service: {e}") + + +def init(service_url): + handler = KaizenLogHandler(service_url) + logger = logging.getLogger() + logger.addHandler(handler) + sys.excepthook = lambda exc_type, exc_value, exc_traceback: exception_handler( + exc_type, exc_value, exc_traceback, service_url + )