@@ -41,6 +41,7 @@ def test_ollama_llm_happy_path_deprecated_options(mock_import: Mock) -> None:
41
41
mock_ollama .Client .return_value .chat .return_value = MagicMock (
42
42
message = MagicMock (content = "ollama chat response" ),
43
43
)
44
+ mock_ollama .Message .return_value = {"role" : "user" , "content" : "test" }
44
45
model = "gpt"
45
46
model_params = {"temperature" : 0.3 }
46
47
with pytest .warns (DeprecationWarning ) as record :
@@ -59,11 +60,12 @@ def test_ollama_llm_happy_path_deprecated_options(mock_import: Mock) -> None:
59
60
res = llm .invoke (question )
60
61
assert isinstance (res , LLMResponse )
61
62
assert res .content == "ollama chat response"
62
- messages = [
63
- {"role" : "user" , "content" : question },
64
- ]
65
63
llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
66
- model = model , messages = messages , options = {"temperature" : 0.3 }
64
+ model = model ,
65
+ messages = [
66
+ {"role" : "user" , "content" : "test" }
67
+ ],
68
+ options = {"temperature" : 0.3 }
67
69
)
68
70
69
71
@@ -90,6 +92,7 @@ def test_ollama_llm_happy_path(mock_import: Mock) -> None:
90
92
mock_ollama .Client .return_value .chat .return_value = MagicMock (
91
93
message = MagicMock (content = "ollama chat response" ),
92
94
)
95
+ mock_ollama .Message .return_value = {"role" : "user" , "content" : "test" }
93
96
model = "gpt"
94
97
options = {"temperature" : 0.3 }
95
98
model_params = {"options" : options , "format" : "json" }
@@ -102,7 +105,7 @@ def test_ollama_llm_happy_path(mock_import: Mock) -> None:
102
105
assert isinstance (res , LLMResponse )
103
106
assert res .content == "ollama chat response"
104
107
messages = [
105
- {"role" : "user" , "content" : question },
108
+ {"role" : "user" , "content" : "test" },
106
109
]
107
110
llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
108
111
model = model ,
@@ -112,102 +115,6 @@ def test_ollama_llm_happy_path(mock_import: Mock) -> None:
112
115
)
113
116
114
117
115
- @patch ("builtins.__import__" )
116
- def test_ollama_invoke_with_system_instruction_happy_path (mock_import : Mock ) -> None :
117
- mock_ollama = get_mock_ollama ()
118
- mock_import .return_value = mock_ollama
119
- mock_ollama .Client .return_value .chat .return_value = MagicMock (
120
- message = MagicMock (content = "ollama chat response" ),
121
- )
122
- model = "gpt"
123
- options = {"temperature" : 0.3 }
124
- model_params = {"options" : options , "format" : "json" }
125
- llm = OllamaLLM (
126
- model ,
127
- model_params = model_params ,
128
- )
129
- system_instruction = "You are a helpful assistant."
130
- question = "What about next season?"
131
-
132
- response = llm .invoke (question , system_instruction = system_instruction )
133
- assert response .content == "ollama chat response"
134
- messages = [{"role" : "system" , "content" : system_instruction }]
135
- messages .append ({"role" : "user" , "content" : question })
136
- llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
137
- model = model ,
138
- messages = messages ,
139
- options = options ,
140
- format = "json" ,
141
- )
142
-
143
-
144
- @patch ("builtins.__import__" )
145
- def test_ollama_invoke_with_message_history_happy_path (mock_import : Mock ) -> None :
146
- mock_ollama = get_mock_ollama ()
147
- mock_import .return_value = mock_ollama
148
- mock_ollama .Client .return_value .chat .return_value = MagicMock (
149
- message = MagicMock (content = "ollama chat response" ),
150
- )
151
- model = "gpt"
152
- options = {"temperature" : 0.3 }
153
- model_params = {"options" : options }
154
- llm = OllamaLLM (
155
- model ,
156
- model_params = model_params ,
157
- )
158
- message_history = [
159
- {"role" : "user" , "content" : "When does the sun come up in the summer?" },
160
- {"role" : "assistant" , "content" : "Usually around 6am." },
161
- ]
162
- question = "What about next season?"
163
-
164
- response = llm .invoke (question , message_history ) # type: ignore
165
- assert response .content == "ollama chat response"
166
- messages = [m for m in message_history ]
167
- messages .append ({"role" : "user" , "content" : question })
168
- llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
169
- model = model , messages = messages , options = options
170
- )
171
-
172
-
173
- @patch ("builtins.__import__" )
174
- def test_ollama_invoke_with_message_history_and_system_instruction (
175
- mock_import : Mock ,
176
- ) -> None :
177
- mock_ollama = get_mock_ollama ()
178
- mock_import .return_value = mock_ollama
179
- mock_ollama .Client .return_value .chat .return_value = MagicMock (
180
- message = MagicMock (content = "ollama chat response" ),
181
- )
182
- model = "gpt"
183
- options = {"temperature" : 0.3 }
184
- model_params = {"options" : options }
185
- system_instruction = "You are a helpful assistant."
186
- llm = OllamaLLM (
187
- model ,
188
- model_params = model_params ,
189
- )
190
- message_history = [
191
- {"role" : "user" , "content" : "When does the sun come up in the summer?" },
192
- {"role" : "assistant" , "content" : "Usually around 6am." },
193
- ]
194
- question = "What about next season?"
195
-
196
- response = llm .invoke (
197
- question ,
198
- message_history , # type: ignore
199
- system_instruction = system_instruction ,
200
- )
201
- assert response .content == "ollama chat response"
202
- messages = [{"role" : "system" , "content" : system_instruction }]
203
- messages .extend (message_history )
204
- messages .append ({"role" : "user" , "content" : question })
205
- llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
206
- model = model , messages = messages , options = options
207
- )
208
- assert llm .client .chat .call_count == 1 # type: ignore
209
-
210
-
211
118
@patch ("builtins.__import__" )
212
119
def test_ollama_invoke_with_message_history_validation_error (mock_import : Mock ) -> None :
213
120
mock_ollama = get_mock_ollama ()
@@ -228,9 +135,8 @@ def test_ollama_invoke_with_message_history_validation_error(mock_import: Mock)
228
135
]
229
136
question = "What about next season?"
230
137
231
- with pytest .raises (LLMGenerationError ) as exc_info :
138
+ with pytest .raises (LLMGenerationError , match = "Input validation failed" ) :
232
139
llm .invoke (question , message_history ) # type: ignore
233
- assert "Input should be 'user', 'assistant' or 'system" in str (exc_info .value )
234
140
235
141
236
142
@pytest .mark .asyncio
0 commit comments