|
| 1 | +// Copyright 2024 Stacklok, Inc |
| 2 | +// |
| 3 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 | +// you may not use this file except in compliance with the License. |
| 5 | +// You may obtain a copy of the License at |
| 6 | +// |
| 7 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 8 | +// |
| 9 | +// Unless required by applicable law or agreed to in writing, software |
| 10 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 11 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 | +// See the License for the specific language governing permissions and |
| 13 | +// limitations under the License. |
| 14 | + |
| 15 | +package main |
| 16 | + |
| 17 | +import ( |
| 18 | + "context" |
| 19 | + "github.com/stackloklabs/gollm/examples/tools/trusty" |
| 20 | + "log" |
| 21 | + "os" |
| 22 | + "strings" |
| 23 | + "time" |
| 24 | + |
| 25 | + "github.com/stackloklabs/gollm/pkg/backend" |
| 26 | +) |
| 27 | + |
| 28 | +var ( |
| 29 | + ollamaHost = "http://localhost:11434" |
| 30 | + ollamaGenModel = "qwen2.5" |
| 31 | + openaiModel = "gpt-4o-mini" |
| 32 | +) |
| 33 | + |
| 34 | +const ( |
| 35 | + systemMessage = ` |
| 36 | +You are a security and software sustainability expert AI assistant. Your task is to help users by evaluating software packages based on their prompt and an optional JSON summary provided by an external tool. |
| 37 | +
|
| 38 | +- Your primary responsibility is to assess whether the package is **malicious**, **deprecated**, or **unsafe** based on the JSON input. |
| 39 | +- **Do not print back** the JSON for the user. Only use relevant information from the JSON for your recommendation. |
| 40 | +- If the package is **malicious**, **deprecated**, or **has very low score**, recommend a **safer alternative** package, and explain why it is a better choice. |
| 41 | +- If the package is NEITHER **malicious**, **deprecated**, or **has very low score**, recommend the package. |
| 42 | +- Do not place emphasis on metrics like **stars** or **forks** unless specifically relevant to safety or sustainability concerns. |
| 43 | +
|
| 44 | +If the user does not specify an **ecosystem** (e.g., npm, pypi, crates, Maven, Go), or a **language** politely ask the user for clarification. |
| 45 | +- If the user asks about a python package, assume pypi. |
| 46 | +- If the user asks about a java package, assume Maven. |
| 47 | +- If the user asks about a javascript package, assume npm. |
| 48 | +- If the user asks about a rust package, assume crates. |
| 49 | +If the user doesn't specify a package manager or language, ask the user for clarification, but *do not* make assumptions about the language. |
| 50 | +
|
| 51 | +Your responses should be concise, clear, and helpful. Focus on security, safety, and active maintenance in this order. |
| 52 | +` |
| 53 | + summarizeMessage = ` |
| 54 | +Summarize the tool's analysis of the package in clear, plain language for the user. |
| 55 | +
|
| 56 | +- If the package is **malicious**, **deprecated**, or **no longer maintained**, provide a **bulleted list** of **two to three** safer alternative packages that serve the same purpose. |
| 57 | +- If the package is **safe**, confirm the package as a **recommended option**. |
| 58 | +
|
| 59 | +Ensure the response is concise and easy to understand. Prioritize **clarity** and **helpfulness** in your explanation. |
| 60 | +` |
| 61 | +) |
| 62 | + |
| 63 | +func main() { |
| 64 | + var generationBackend backend.Backend |
| 65 | + |
| 66 | + beSelection := os.Getenv("BACKEND") |
| 67 | + if beSelection == "" { |
| 68 | + log.Println("No backend selected with the BACKEND env variable. Defaulting to Ollama.") |
| 69 | + beSelection = "ollama" |
| 70 | + } |
| 71 | + modelSelection := os.Getenv("MODEL") |
| 72 | + if modelSelection == "" { |
| 73 | + switch beSelection { |
| 74 | + case "ollama": |
| 75 | + modelSelection = ollamaGenModel |
| 76 | + case "openai": |
| 77 | + modelSelection = openaiModel |
| 78 | + } |
| 79 | + log.Println("No model selected with the MODEL env variable. Defaulting to ", modelSelection) |
| 80 | + } |
| 81 | + |
| 82 | + switch beSelection { |
| 83 | + case "ollama": |
| 84 | + generationBackend = backend.NewOllamaBackend(ollamaHost, ollamaGenModel) |
| 85 | + log.Println("Using Ollama backend: ", ollamaGenModel) |
| 86 | + case "openai": |
| 87 | + openaiKey := os.Getenv("OPENAI_API_KEY") |
| 88 | + if openaiKey == "" { |
| 89 | + log.Fatalf("OPENAI_API_KEY is required for OpenAI backend") |
| 90 | + } |
| 91 | + generationBackend = backend.NewOpenAIBackend(openaiKey, openaiModel) |
| 92 | + log.Println("Using OpenAI backend: ", openaiModel) |
| 93 | + default: |
| 94 | + log.Fatalf("Unknown backend: %s", beSelection) |
| 95 | + } |
| 96 | + |
| 97 | + ctx, cancel := context.WithTimeout(context.Background(), 120*time.Second) |
| 98 | + defer cancel() |
| 99 | + |
| 100 | + userPrompt := os.Args[1:] |
| 101 | + if len(userPrompt) == 0 { |
| 102 | + log.Fatalf("Please provide a prompt") |
| 103 | + } |
| 104 | + |
| 105 | + convo := backend.NewConversation() |
| 106 | + convo.Tools.RegisterTool(trusty.Tool()) |
| 107 | + // start the conversation. We add a system message to tune the output |
| 108 | + // and add the trusty tool to the conversation so that the model knows to call it. |
| 109 | + convo.AddSystemMessage(systemMessage, nil) |
| 110 | + convo.AddUserMessage(strings.Join(userPrompt, " "), nil) |
| 111 | + |
| 112 | + // generate the response |
| 113 | + resp, err := generationBackend.Converse(ctx, convo) |
| 114 | + if err != nil { |
| 115 | + log.Fatalf("Error generating response: %v", err) |
| 116 | + } |
| 117 | + |
| 118 | + if len(resp.ToolCalls) == 0 { |
| 119 | + log.Println("No tool calls in response.") |
| 120 | + log.Println("Response:", convo.Messages[len(convo.Messages)-1]["content"]) |
| 121 | + return |
| 122 | + } |
| 123 | + |
| 124 | + log.Println("Tool called") |
| 125 | + |
| 126 | + // if there was a tool response, first just feed it back to the model so it makes sense of it |
| 127 | + _, err = generationBackend.Converse(ctx, convo) |
| 128 | + if err != nil { |
| 129 | + log.Fatalf("Error generating response: %v", err) |
| 130 | + } |
| 131 | + |
| 132 | + log.Println("Summarizing tool response") |
| 133 | + |
| 134 | + // summarize the tool response |
| 135 | + convo.AddSystemMessage(summarizeMessage, nil) |
| 136 | + _, err = generationBackend.Converse(ctx, convo) |
| 137 | + if err != nil { |
| 138 | + log.Fatalf("Error generating response: %v", err) |
| 139 | + } |
| 140 | + |
| 141 | + log.Println("Response:") |
| 142 | + log.Println(convo.Messages[len(convo.Messages)-1]["content"]) |
| 143 | +} |
0 commit comments