Skip to content

Commit c03af19

Browse files
authored
Merge pull request #30 from openworm/fix-update-import
Update langchain import, other ollama fixes
2 parents ca21c50 + 2762ed6 commit c03af19

File tree

1 file changed

+20
-20
lines changed

1 file changed

+20
-20
lines changed

openworm_ai/utils/llms.py

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import time
22

3-
from langchain.prompts import PromptTemplate
3+
from langchain_core.prompts import PromptTemplate
44
from langchain_core.output_parsers import StrOutputParser
55
from langchain.chat_models import init_chat_model
66

@@ -26,39 +26,39 @@
2626
LLM_COHERE = "Cohere"
2727
LLM_CMD_LINE_ARGS["-co"] = LLM_COHERE
2828

29-
LLM_OLLAMA_LLAMA32 = "Ollama:llama3.2"
29+
LLM_OLLAMA_LLAMA32 = "ollama:llama3.2"
3030
LLM_CMD_LINE_ARGS["-o-l32"] = LLM_OLLAMA_LLAMA32
31-
LLM_OLLAMA_LLAMA32_1B = "Ollama:llama3.2:1b"
31+
LLM_OLLAMA_LLAMA32_1B = "ollama:llama3.2:1b"
3232
LLM_CMD_LINE_ARGS["-o-l321b"] = LLM_OLLAMA_LLAMA32_1B
3333

34-
LLM_OLLAMA_LLAMA32_3B = "Ollama:llama3.2:3b"
34+
LLM_OLLAMA_LLAMA32_3B = "ollama:llama3.2:3b"
3535
LLM_CMD_LINE_ARGS["-o-l323b"] = LLM_OLLAMA_LLAMA32_3B
3636

37-
LLM_OLLAMA_MISTRAL = "Ollama:mistral"
37+
LLM_OLLAMA_MISTRAL = "ollama:mistral"
3838
LLM_CMD_LINE_ARGS["-o-m"] = LLM_OLLAMA_MISTRAL
39-
LLM_OLLAMA_TINYLLAMA = "Ollama:tinyllama"
39+
LLM_OLLAMA_TINYLLAMA = "ollama:tinyllama"
4040
LLM_CMD_LINE_ARGS["-o-t"] = LLM_OLLAMA_TINYLLAMA
41-
LLM_OLLAMA_PHI3 = "Ollama:phi3:latest"
41+
LLM_OLLAMA_PHI3 = "ollama:phi3:latest"
4242
LLM_CMD_LINE_ARGS["-o-phi3"] = LLM_OLLAMA_PHI3
43-
LLM_OLLAMA_PHI4 = "Ollama:phi4:latest"
43+
LLM_OLLAMA_PHI4 = "ollama:phi4:latest"
4444
LLM_CMD_LINE_ARGS["-o-phi4"] = LLM_OLLAMA_PHI4
45-
LLM_OLLAMA_GEMMA = "Ollama:gemma:7b"
45+
LLM_OLLAMA_GEMMA = "ollama:gemma:7b"
4646
LLM_CMD_LINE_ARGS["-ge"] = LLM_OLLAMA_GEMMA
47-
LLM_OLLAMA_GEMMA2 = "Ollama:gemma2:latest"
47+
LLM_OLLAMA_GEMMA2 = "ollama:gemma2:latest"
4848
LLM_CMD_LINE_ARGS["-ge2"] = LLM_OLLAMA_GEMMA2
49-
LLM_OLLAMA_GEMMA3 = "Ollama:gemma3:4b"
49+
LLM_OLLAMA_GEMMA3 = "ollama:gemma3:4b"
5050
LLM_CMD_LINE_ARGS["-ge3"] = LLM_OLLAMA_GEMMA3
51-
LLM_OLLAMA_DEEPSEEK = "Ollama:deepseek-r1:7b"
51+
LLM_OLLAMA_DEEPSEEK = "ollama:deepseek-r1:7b"
5252
LLM_CMD_LINE_ARGS["-o-dsr1"] = LLM_OLLAMA_DEEPSEEK
5353

54-
LLM_OLLAMA_QWEN = "Ollama:qwen3:1.7b"
54+
LLM_OLLAMA_QWEN = "ollama:qwen3:1.7b"
5555
LLM_CMD_LINE_ARGS["-o-qw"] = LLM_OLLAMA_QWEN
5656

57-
LLM_OLLAMA_CODELLAMA = "Ollama:codellama:latest"
58-
LLM_OLLAMA_FALCON2 = "Ollama:falcon2:latest"
59-
LLM_OLLAMA_FALCON2 = "Ollama:falcon2:latest"
57+
LLM_OLLAMA_CODELLAMA = "ollama:codellama:latest"
58+
LLM_OLLAMA_FALCON2 = "ollama:falcon2:latest"
59+
LLM_OLLAMA_FALCON2 = "ollama:falcon2:latest"
6060

61-
LLM_OLLAMA_OLMO2_7B = "Ollama:olmo2:7b"
61+
LLM_OLLAMA_OLMO2_7B = "ollama:olmo2:7b"
6262
LLM_CMD_LINE_ARGS["-o-olmo27b"] = LLM_OLLAMA_OLMO2_7B
6363

6464
OPENAI_LLMS = [LLM_GPT35, LLM_GPT4, LLM_GPT4o]
@@ -108,7 +108,7 @@ def generate_response(input_text, llm_ver, temperature, only_celegans):
108108
prompt = PromptTemplate(template=template, input_variables=["question"])
109109

110110
try:
111-
llm = init_chat_model(llm_ver, temperature)
111+
llm = init_chat_model(llm_ver, temperature=temperature)
112112

113113
llm_chain = prompt | llm | StrOutputParser()
114114

@@ -127,7 +127,7 @@ def generate_panel_response(input_text, llm_panelists, llm_panel_chair, temperat
127127
template=GENERAL_QUERY_PROMPT_TEMPLATE, input_variables=["question"]
128128
)
129129

130-
llm = init_chat_model(llm_ver, temperature)
130+
llm = init_chat_model(llm_ver, temperature=temperature)
131131

132132
llm_chain = prompt | llm | StrOutputParser()
133133

@@ -156,7 +156,7 @@ def generate_panel_response(input_text, llm_panelists, llm_panel_chair, temperat
156156

157157
prompt = PromptTemplate(template=panel_chair_prompt, input_variables=["question"])
158158

159-
llm = init_chat_model(llm_panel_chair, temperature)
159+
llm = init_chat_model(llm_panel_chair, temperature=temperature)
160160

161161
llm_chain = prompt | llm | StrOutputParser()
162162

0 commit comments

Comments
 (0)