2727
2828from __future__ import annotations
2929
30+ import asyncio
3031import logging
3132from enum import Enum
3233from functools import cache
3334from typing import Any
3435
35- from openai import AsyncOpenAI , NotFoundError
36+ from openai import AsyncOpenAI , NotFoundError , OpenAI
3637from pydantic import BaseModel , ConfigDict , Field
3738
39+ try :
40+ from openai import AsyncAzureOpenAI , AzureOpenAI # type: ignore
41+ except Exception : # pragma: no cover - optional dependency
42+ AsyncAzureOpenAI = object # type: ignore
43+ AzureOpenAI = object # type: ignore
44+
3845from guardrails .registry import default_spec_registry
3946from guardrails .spec import GuardrailSpecMetadata
4047from guardrails .types import GuardrailResult
@@ -130,11 +137,14 @@ def _get_moderation_client() -> AsyncOpenAI:
130137 return AsyncOpenAI ()
131138
132139
133- async def _call_moderation_api (client : AsyncOpenAI , data : str ) -> Any :
134- """Call the OpenAI moderation API.
140+ async def _call_moderation_api_async (
141+ client : AsyncOpenAI | AsyncAzureOpenAI ,
142+ data : str , # type: ignore
143+ ) -> Any :
144+ """Call the OpenAI moderation API asynchronously.
135145
136146 Args:
137- client: The OpenAI client to use.
147+ client: The async OpenAI or Azure OpenAI client to use.
138148 data: The text to analyze.
139149
140150 Returns:
@@ -146,6 +156,22 @@ async def _call_moderation_api(client: AsyncOpenAI, data: str) -> Any:
146156 )
147157
148158
159+ def _call_moderation_api_sync (client : OpenAI | AzureOpenAI , data : str ) -> Any : # type: ignore
160+ """Call the OpenAI moderation API synchronously.
161+
162+ Args:
163+ client: The sync OpenAI or Azure OpenAI client to use.
164+ data: The text to analyze.
165+
166+ Returns:
167+ The moderation API response.
168+ """
169+ return client .moderations .create (
170+ model = "omni-moderation-latest" ,
171+ input = data ,
172+ )
173+
174+
149175async def moderation (
150176 ctx : Any ,
151177 data : str ,
@@ -169,8 +195,15 @@ async def moderation(
169195 client = getattr (ctx , "guardrail_llm" , None ) if ctx is not None else None
170196
171197 if client is not None :
198+ # Determine if client is async or sync and call appropriately
199+ is_async_client = isinstance (client , AsyncOpenAI | AsyncAzureOpenAI )
200+
172201 try :
173- resp = await _call_moderation_api (client , data )
202+ if is_async_client :
203+ resp = await _call_moderation_api_async (client , data )
204+ else :
205+ # Sync client - run in thread pool to avoid blocking event loop
206+ resp = await asyncio .to_thread (_call_moderation_api_sync , client , data )
174207 except NotFoundError as e :
175208 # Moderation endpoint doesn't exist on this provider (e.g., third-party)
176209 # Fall back to the OpenAI client
@@ -179,11 +212,11 @@ async def moderation(
179212 e ,
180213 )
181214 client = _get_moderation_client ()
182- resp = await _call_moderation_api (client , data )
215+ resp = await _call_moderation_api_async (client , data )
183216 else :
184217 # No context client, use fallback
185218 client = _get_moderation_client ()
186- resp = await _call_moderation_api (client , data )
219+ resp = await _call_moderation_api_async (client , data )
187220 results = resp .results or []
188221 if not results :
189222 return GuardrailResult (
0 commit comments