Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def __init__(
temperature: float | None = None,
top_p: float | None = None,
vector_store_id: str | None = None,
metadata: dict[str, Any] | None = None,
metadata: dict[str, str] | None = None,
max_completion_tokens: int | None = None,
max_prompt_tokens: int | None = None,
parallel_tool_calls_enabled: bool | None = True,
Expand Down Expand Up @@ -193,7 +193,7 @@ async def create(
temperature: float | None = None,
top_p: float | None = None,
vector_store_id: str | None = None,
metadata: dict[str, Any] | None = None,
metadata: dict[str, str] | None = None,
max_completion_tokens: int | None = None,
max_prompt_tokens: int | None = None,
parallel_tool_calls_enabled: bool | None = True,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ def __init__(
temperature: float | None = None,
top_p: float | None = None,
vector_store_id: str | None = None,
metadata: dict[str, Any] | None = None,
metadata: dict[str, str] | None = None,
max_completion_tokens: int | None = None,
max_prompt_tokens: int | None = None,
parallel_tool_calls_enabled: bool | None = True,
Expand Down Expand Up @@ -184,7 +184,7 @@ async def create(
temperature: float | None = None,
top_p: float | None = None,
vector_store_id: str | None = None,
metadata: dict[str, Any] | None = None,
metadata: dict[str, str] | None = None,
max_completion_tokens: int | None = None,
max_prompt_tokens: int | None = None,
parallel_tool_calls_enabled: bool | None = True,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def __init__(
temperature: float | None = None,
top_p: float | None = None,
vector_store_id: str | None = None,
metadata: dict[str, Any] | None = None,
metadata: dict[str, str] | None = None,
max_completion_tokens: int | None = None,
max_prompt_tokens: int | None = None,
parallel_tool_calls_enabled: bool | None = True,
Expand Down Expand Up @@ -368,10 +368,7 @@ def _create_open_ai_assistant_definition(cls, assistant: "Assistant") -> dict[st
execution_settings = {}
template_format = "semantic-kernel"
if isinstance(assistant.metadata, dict) and OpenAIAssistantBase._options_metadata_key in assistant.metadata:
settings_data = assistant.metadata[OpenAIAssistantBase._options_metadata_key]
if isinstance(settings_data, str):
settings_data = json.loads(settings_data)
assistant.metadata[OpenAIAssistantBase._options_metadata_key] = settings_data
settings_data = json.loads(assistant.metadata[OpenAIAssistantBase._options_metadata_key])
execution_settings = {key: value for key, value in settings_data.items()}
template_format = assistant.metadata.get(OpenAIAssistantBase._template_metadata_key, "semantic-kernel")

Expand Down
17 changes: 9 additions & 8 deletions python/tests/unit/agents/test_azure_assistant_agent.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Copyright (c) Microsoft. All rights reserved.

import json
from unittest.mock import AsyncMock, MagicMock, mock_open, patch

import pytest
Expand Down Expand Up @@ -35,12 +36,12 @@ def mock_assistant():
created_at=123456789,
object="assistant",
metadata={
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
model="test_model",
description="test_description",
Expand Down Expand Up @@ -211,12 +212,12 @@ async def test_list_definitions(kernel: Kernel, mock_assistant, azure_openai_uni
"top_p": 0.9,
"vector_store_id": "vector_store1",
"metadata": {
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
Expand Down Expand Up @@ -251,12 +252,12 @@ async def test_retrieve_agent(kernel, azure_openai_unit_test_env):
"top_p": 0.9,
"vector_store_id": "vector_store1",
"metadata": {
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
Expand Down Expand Up @@ -300,12 +301,12 @@ async def test_retrieve_agent(kernel, azure_openai_unit_test_env):
"top_p": 0.9,
"vector_store_id": "vector_store1",
"metadata": {
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
Expand Down
34 changes: 15 additions & 19 deletions python/tests/unit/agents/test_open_ai_assistant_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,11 +6,7 @@
import pytest
from openai import AsyncOpenAI
from openai.resources.beta.assistants import Assistant
from openai.types.beta.assistant import (
ToolResources,
ToolResourcesCodeInterpreter,
ToolResourcesFileSearch,
)
from openai.types.beta.assistant import ToolResources, ToolResourcesCodeInterpreter, ToolResourcesFileSearch
from pydantic import ValidationError

from semantic_kernel.agents.open_ai import OpenAIAssistantAgent
Expand Down Expand Up @@ -41,12 +37,12 @@ def mock_assistant():
created_at=123456789,
object="assistant",
metadata={
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
model="test_model",
description="test_description",
Expand Down Expand Up @@ -252,12 +248,12 @@ async def test_list_definitions(kernel: Kernel, openai_unit_test_env):
description="test_description",
instructions="test_instructions",
metadata={
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
model="test_model",
name="test_name",
Expand Down Expand Up @@ -303,12 +299,12 @@ async def test_list_definitions(kernel: Kernel, openai_unit_test_env):
"top_p": 0.9,
"vector_store_id": "vector_store1",
"metadata": {
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
Expand Down Expand Up @@ -376,12 +372,12 @@ def test_create_open_ai_assistant_definition_with_json_metadata(mock_assistant_j
"top_p": 0.9,
"vector_store_id": "vector_store1",
"metadata": {
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
Expand All @@ -404,12 +400,12 @@ def test_create_open_ai_assistant_definition_with_json_metadata(mock_assistant_j
"top_p": 0.9,
"vector_store_id": "vector_store1",
"metadata": {
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
Expand Down Expand Up @@ -440,12 +436,12 @@ async def test_retrieve_agent(kernel, openai_unit_test_env):
"top_p": 0.9,
"vector_store_id": "vector_store1",
"metadata": {
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
Expand Down Expand Up @@ -495,12 +491,12 @@ async def test_retrieve_agent(kernel, openai_unit_test_env):
"top_p": 0.9,
"vector_store_id": "vector_store1",
"metadata": {
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
Expand Down
10 changes: 4 additions & 6 deletions python/tests/unit/agents/test_open_ai_assistant_base.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
# Copyright (c) Microsoft. All rights reserved.

import json
from datetime import datetime, timedelta, timezone
from typing import Any
from unittest.mock import AsyncMock, MagicMock, mock_open, patch
Expand Down Expand Up @@ -43,10 +44,7 @@
ToolCallDeltaObject,
ToolCallsStepDetails,
)
from openai.types.beta.threads.runs.code_interpreter_tool_call import (
CodeInterpreter,
CodeInterpreterToolCall,
)
from openai.types.beta.threads.runs.code_interpreter_tool_call import CodeInterpreter, CodeInterpreterToolCall
from openai.types.beta.threads.runs.code_interpreter_tool_call_delta import CodeInterpreter as CodeInterpreterDelta
from openai.types.beta.threads.runs.code_interpreter_tool_call_delta import CodeInterpreterToolCallDelta
from openai.types.beta.threads.runs.function_tool_call import Function as RunsFunction
Expand Down Expand Up @@ -118,12 +116,12 @@ def mock_assistant():
created_at=123456789,
object="assistant",
metadata={
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
model="test_model",
description="test_description",
Expand Down
5 changes: 3 additions & 2 deletions python/tests/unit/agents/test_open_ai_assistant_channel.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
# Copyright (c) Microsoft. All rights reserved.
import json
from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch

Expand Down Expand Up @@ -74,12 +75,12 @@ def mock_assistant():
created_at=123456789,
object="assistant",
metadata={
"__run_options": {
"__run_options": json.dumps({
"max_completion_tokens": 100,
"max_prompt_tokens": 50,
"parallel_tool_calls_enabled": True,
"truncation_message_count": 10,
}
})
},
model="test_model",
description="test_description",
Expand Down
Loading