Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -90,3 +90,6 @@ Some examples require extra dependencies. See each sample's directory for specif
To run the tests:

uv run poe test

Note that this will skip running `openai_agents` tests against real OpenAI API calls if an API key is not found, and use only mocked models.
To run with real model calls, set `OPENAI_API_KEY` in your environment.
Empty file.
54 changes: 54 additions & 0 deletions tests/openai_agents/basic/test_agent_lifecycle_workflow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import os
import uuid
from concurrent.futures import ThreadPoolExecutor

import pytest
from temporalio.client import Client
from temporalio.contrib.openai_agents.testing import (
AgentEnvironment,
ResponseBuilders,
TestModel,
)
from temporalio.worker import Worker

from openai_agents.basic.workflows.agent_lifecycle_workflow import (
AgentLifecycleWorkflow,
)


def agent_lifecycle_test_model():
return TestModel.returning_responses(
[ResponseBuilders.output_message('{"number": 10}')]
)


@pytest.mark.parametrize("mock_model", [True, False])
async def test_execute_workflow(client: Client, mock_model: bool):
task_queue_name = str(uuid.uuid4())
if not mock_model and not os.environ.get("OPENAI_API_KEY"):
pytest.skip(
f"Skipping test (mock_model={mock_model}), because OPENAI_API_KEY is not set"
)

async with AgentEnvironment(
model=agent_lifecycle_test_model() if mock_model else None
) as agent_env:
client = agent_env.applied_on_client(client)
async with Worker(
client,
task_queue=task_queue_name,
workflows=[AgentLifecycleWorkflow],
activity_executor=ThreadPoolExecutor(5),
):
result = await client.execute_workflow(
AgentLifecycleWorkflow.run,
10, # max_number parameter
id=str(uuid.uuid4()),
task_queue=task_queue_name,
)

# Verify the result has the expected structure
assert isinstance(result.number, int)
assert (
0 <= result.number <= 20
) # Should be between 0 and max*2 due to multiply operation
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import os
import uuid
from concurrent.futures import ThreadPoolExecutor

import pytest
from temporalio.client import Client
from temporalio.contrib.openai_agents.testing import (
AgentEnvironment,
ResponseBuilders,
TestModel,
)
from temporalio.worker import Worker

from openai_agents.basic.workflows.dynamic_system_prompt_workflow import (
DynamicSystemPromptWorkflow,
)


def dynamic_system_prompt_test_model():
return TestModel.returning_responses(
[
ResponseBuilders.output_message(
"Style: haiku\nResponse: The weather is cloudy with a chance of meatballs."
)
]
)


@pytest.mark.parametrize("mock_model", [True, False])
async def test_execute_workflow_with_random_style(client: Client, mock_model: bool):
task_queue_name = str(uuid.uuid4())
if not mock_model and not os.environ.get("OPENAI_API_KEY"):
pytest.skip(
f"Skipping test (mock_model={mock_model}), because OPENAI_API_KEY is not set"
)

async with AgentEnvironment(
model=dynamic_system_prompt_test_model() if mock_model else None
) as agent_env:
client = agent_env.applied_on_client(client)
async with Worker(
client,
task_queue=task_queue_name,
workflows=[DynamicSystemPromptWorkflow],
activity_executor=ThreadPoolExecutor(5),
):
result = await client.execute_workflow(
DynamicSystemPromptWorkflow.run,
"Tell me about the weather today.",
id=str(uuid.uuid4()),
task_queue=task_queue_name,
)

# Verify the result has the expected format
assert "Style:" in result
assert "Response:" in result
assert any(style in result for style in ["haiku", "pirate", "robot"])


@pytest.mark.parametrize("mock_model", [True, False])
async def test_execute_workflow_with_specific_style(client: Client, mock_model: bool):
task_queue_name = str(uuid.uuid4())
if not mock_model and not os.environ.get("OPENAI_API_KEY"):
pytest.skip(
f"Skipping test (mock_model={mock_model}), because OPENAI_API_KEY is not set"
)

async with AgentEnvironment(
model=dynamic_system_prompt_test_model() if mock_model else None
) as agent_env:
client = agent_env.applied_on_client(client)
async with Worker(
client,
task_queue=task_queue_name,
workflows=[DynamicSystemPromptWorkflow],
activity_executor=ThreadPoolExecutor(5),
):
result = await client.execute_workflow(
DynamicSystemPromptWorkflow.run,
args=["Tell me about the weather today.", "haiku"],
id=str(uuid.uuid4()),
task_queue=task_queue_name,
)

# Verify the result has the expected format and style
assert "Style: haiku" in result
assert "Response:" in result
52 changes: 52 additions & 0 deletions tests/openai_agents/basic/test_hello_world_workflow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import os
import uuid
from concurrent.futures import ThreadPoolExecutor

import pytest
from temporalio.client import Client
from temporalio.contrib.openai_agents.testing import (
AgentEnvironment,
ResponseBuilders,
TestModel,
)
from temporalio.worker import Worker

from openai_agents.basic.workflows.hello_world_workflow import HelloWorldAgent


def hello_world_test_model():
return TestModel.returning_responses(
[ResponseBuilders.output_message("This is a haiku (not really)")]
)


@pytest.mark.parametrize("mock_model", [True, False])
async def test_execute_workflow(client: Client, mock_model: bool):
task_queue_name = str(uuid.uuid4())
if not mock_model and not os.environ.get("OPENAI_API_KEY"):
pytest.skip(
f"Skipping test (mock_model={mock_model}), because OPENAI_API_KEY is not set"
)

async with AgentEnvironment(
model=hello_world_test_model() if mock_model else None
) as agent_env:
client = agent_env.applied_on_client(client)
async with Worker(
client,
task_queue=task_queue_name,
workflows=[HelloWorldAgent],
activity_executor=ThreadPoolExecutor(5),
):
result = await client.execute_workflow(
HelloWorldAgent.run,
"Write a recursive haiku about recursive haikus.",
id=str(uuid.uuid4()),
task_queue=task_queue_name,
)

if mock_model:
assert result == "This is a haiku (not really)"
else:
assert isinstance(result, str)
assert len(result) > 0
52 changes: 52 additions & 0 deletions tests/openai_agents/basic/test_lifecycle_workflow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
import os
import uuid
from concurrent.futures import ThreadPoolExecutor

import pytest
from temporalio.client import Client
from temporalio.contrib.openai_agents.testing import (
AgentEnvironment,
ResponseBuilders,
TestModel,
)
from temporalio.worker import Worker

from openai_agents.basic.workflows.lifecycle_workflow import LifecycleWorkflow


def lifecycle_test_model():
return TestModel.returning_responses(
[ResponseBuilders.output_message('{"number": 10}')]
)


@pytest.mark.parametrize("mock_model", [True, False])
async def test_execute_workflow(client: Client, mock_model: bool):
task_queue_name = str(uuid.uuid4())
if not mock_model and not os.environ.get("OPENAI_API_KEY"):
pytest.skip(
f"Skipping test (mock_model={mock_model}), because OPENAI_API_KEY is not set"
)

async with AgentEnvironment(
model=lifecycle_test_model() if mock_model else None
) as agent_env:
client = agent_env.applied_on_client(client)
async with Worker(
client,
task_queue=task_queue_name,
workflows=[LifecycleWorkflow],
activity_executor=ThreadPoolExecutor(5),
):
result = await client.execute_workflow(
LifecycleWorkflow.run,
10, # max_number parameter
id=str(uuid.uuid4()),
task_queue=task_queue_name,
)

# Verify the result has the expected structure
assert isinstance(result.number, int)
assert (
0 <= result.number <= 20
) # Should be between 0 and max*2 due to multiply operation
84 changes: 84 additions & 0 deletions tests/openai_agents/basic/test_local_image_workflow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
import os
import uuid
from concurrent.futures import ThreadPoolExecutor

import pytest
from temporalio.client import Client
from temporalio.contrib.openai_agents.testing import (
AgentEnvironment,
ResponseBuilders,
TestModel,
)
from temporalio.worker import Worker

from openai_agents.basic.activities.image_activities import read_image_as_base64
from openai_agents.basic.workflows.local_image_workflow import LocalImageWorkflow


def local_image_test_model():
return TestModel.returning_responses(
[ResponseBuilders.output_message("I can see a bison in the image.")]
)


@pytest.mark.parametrize("mock_model", [True, False])
async def test_execute_workflow_default_question(client: Client, mock_model: bool):
task_queue_name = str(uuid.uuid4())
if not mock_model and not os.environ.get("OPENAI_API_KEY"):
pytest.skip(
f"Skipping test (mock_model={mock_model}), because OPENAI_API_KEY is not set"
)

async with AgentEnvironment(
model=local_image_test_model() if mock_model else None
) as agent_env:
client = agent_env.applied_on_client(client)
async with Worker(
client,
task_queue=task_queue_name,
workflows=[LocalImageWorkflow],
activity_executor=ThreadPoolExecutor(5),
activities=[read_image_as_base64],
):
result = await client.execute_workflow(
LocalImageWorkflow.run,
"openai_agents/basic/media/image_bison.jpg", # Path to test image
id=str(uuid.uuid4()),
task_queue=task_queue_name,
)

# Verify the result is a string response
assert isinstance(result, str)
assert len(result) > 0


@pytest.mark.parametrize("mock_model", [True, False])
async def test_execute_workflow_custom_question(client: Client, mock_model: bool):
task_queue_name = str(uuid.uuid4())
if not mock_model and not os.environ.get("OPENAI_API_KEY"):
pytest.skip(
f"Skipping test (mock_model={mock_model}), because OPENAI_API_KEY is not set"
)

async with AgentEnvironment(
model=local_image_test_model() if mock_model else None
) as agent_env:
client = agent_env.applied_on_client(client)
async with Worker(
client,
task_queue=task_queue_name,
workflows=[LocalImageWorkflow],
activity_executor=ThreadPoolExecutor(5),
activities=[read_image_as_base64],
):
custom_question = "What animals do you see in this image?"
result = await client.execute_workflow(
LocalImageWorkflow.run,
args=["openai_agents/basic/media/image_bison.jpg", custom_question],
id=str(uuid.uuid4()),
task_queue=task_queue_name,
)

# Verify the result is a string response
assert isinstance(result, str)
assert len(result) > 0
Loading
Loading