From feea6f169384ef2526c6b4302eab1ae3de0880a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nathana=C3=ABl=20HANNEBERT?= Date: Tue, 3 Mar 2026 10:22:13 +0100 Subject: [PATCH 1/4] feat(openai): add OpenAI STT provider support The application only supports Gladia as the STT backend. Users who already have an OpenAI API key, or who run a self-hosted OpenAI-compatible Whisper server, cannot use the application without signing up for Gladia. Add an OpenAI STT provider backed by livekit-agents[openai]. A new STT_PROVIDER env var (default: "gladia") selects the backend at startup. When set to "openai", an OpenAiSttAgent is used instead of GladiaSttAgent. Both agents implement the same EventEmitter interface, so main.py requires only minimal changes (provider selection + using an active_stt_config for confidence thresholds). Key differences from the Gladia agent: - update_locale_for_user() stops and restarts the pipeline instead of calling stream.update_options() (not supported by the OpenAI plugin). - Confidence thresholds default to 0.0 because OpenAI STT does not report per-utterance confidence scores. - alternative.language may be None; fall back to original_lang so the locale-mapping logic does not break. New env vars: OPENAI_API_KEY, OPENAI_STT_MODEL, OPENAI_BASE_URL, OPENAI_INTERIM_RESULTS, OPENAI_MIN_CONFIDENCE_FINAL/INTERIM. OPENAI_BASE_URL allows pointing at any OpenAI-compatible endpoint (e.g. a local faster-whisper server). --- .env.example | 26 ++ CHANGELOG.md | 1 + README.md | 50 +++- config.py | 31 ++ main.py | 31 +- openai_stt_agent.py | 221 +++++++++++++++ pyproject.toml | 4 +- tests/integration/test_openai_stt.py | 94 +++++++ tests/test_config.py | 99 +++++++ tests/test_openai_stt_agent.py | 407 +++++++++++++++++++++++++++ uv.lock | 189 ++++++++++++- 11 files changed, 1132 insertions(+), 21 deletions(-) create mode 100644 openai_stt_agent.py create mode 100644 tests/integration/test_openai_stt.py create mode 100644 tests/test_openai_stt_agent.py diff --git a/.env.example b/.env.example index 4166e29..d36727f 100644 --- a/.env.example +++ b/.env.example @@ -6,6 +6,12 @@ REDIS_HOST=127.0.0.1 REDIS_PORT=6789 REDIS_PASSWORD= +# STT provider: "gladia" (default) or "openai" +#STT_PROVIDER=gladia + +# ============================================================================= +# --- Gladia STT (STT_PROVIDER=gladia) --- +# ============================================================================= GLADIA_API_KEY= # The following env vars serves as a translation locale mapper between # (Gladia) and - (BBB) locale formats. @@ -55,3 +61,23 @@ GLADIA_TRANSLATION_LANG_MAP="de:de-DE,en:en-US,es:es-ES,fr:fr-FR,hi:hi-IN,it:it- #GLADIA_PRE_PROCESSING_AUDIO_ENHANCER=false #GLADIA_PRE_PROCESSING_SPEECH_THRESHOLD=0.5 + +# ============================================================================= +# --- OpenAI STT (STT_PROVIDER=openai) --- +# Supports the official OpenAI API and any OpenAI-compatible endpoint. +# ============================================================================= + +# OpenAI API key (required) +#OPENAI_API_KEY= + +# Transcription model (default: gpt-4o-transcribe; use "whisper-1" for classic Whisper) +#OPENAI_STT_MODEL=gpt-4o-transcribe + +# Base URL override — set this to use a compatible provider (e.g. a local Whisper server) +#OPENAI_BASE_URL= + +#OPENAI_INTERIM_RESULTS=false + +# Minimum confidence thresholds (OpenAI does not report confidence; default 0.0 = no filtering) +#OPENAI_MIN_CONFIDENCE_FINAL=0.0 +#OPENAI_MIN_CONFIDENCE_INTERIM=0.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 95f44ee..71d021b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ Final releases will consolidate all intermediate changes in chronological order. ## UNRELEASED +* feat(openai): add OpenAI STT provider support (official and compatible endpoints) * feat(tests): add unit and integration tests with pytest * feat(tests): add coverage reporting with pytest-cov * feat(tests): add tests for v0.2.0 changes (utils coercions, config redaction, on_track_subscribed fix, new defaults) diff --git a/README.md b/README.md index 4327e60..30a4bad 100644 --- a/README.md +++ b/README.md @@ -3,10 +3,10 @@ This application provides Speech-to-Text (STT) for BigBlueButton meetings using LiveKit as their audio bridge. -Initially, the only supported STT engine is Gladia through the official [LiveKit Gladia Plugin](https://docs.livekit.io/agents/integrations/stt/gladia/). +Supported STT engines: -It'll be expanded in the future to support other STT plugins from the LiveKit Agents -ecosystem. +- **Gladia** — via the official [LiveKit Gladia plugin](https://docs.livekit.io/agents/integrations/stt/gladia/) (default) +- **OpenAI** — via the [LiveKit OpenAI plugin](https://docs.livekit.io/agents/models/stt/openai/); supports the official OpenAI API and any OpenAI-compatible endpoint ## Getting Started @@ -14,7 +14,7 @@ ecosystem. - Python 3.10+ - A LiveKit instance -- A Gladia API key +- A Gladia API key **or** an OpenAI API key (depending on your chosen STT provider) - uv: - See installation instructions: https://docs.astral.sh/uv/getting-started/installation/ @@ -48,13 +48,17 @@ ecosystem. LIVEKIT_API_KEY=... LIVEKIT_API_SECRET=... - # Gladia API Key + # For Gladia (default provider): GLADIA_API_KEY=... + + # For OpenAI (set STT_PROVIDER=openai): + # STT_PROVIDER=openai + # OPENAI_API_KEY=... ``` Feel free to check `.env.example` for any other configurations of interest. - **All options ingested by the Gladia STT plugin are exposed via env vars**. + **All options ingested by the Gladia and OpenAI STT plugins are exposed via env vars**. ### Running @@ -98,6 +102,30 @@ docker run --network host --rm -it --env-file .env bbb-livekit-stt Pre-built images are available via GitHub Container Registry as well. +### OpenAI STT provider + +Set `STT_PROVIDER=openai` to use OpenAI STT instead of Gladia. + +**Official OpenAI API:** + +```bash +STT_PROVIDER=openai +OPENAI_API_KEY=your-key +# OPENAI_STT_MODEL=gpt-4o-transcribe # default; use "whisper-1" for classic Whisper +``` + +**OpenAI-compatible endpoint** (e.g. a self-hosted Whisper server): + +```bash +STT_PROVIDER=openai +OPENAI_API_KEY=any-value +OPENAI_BASE_URL=http://your-server:8000 +OPENAI_STT_MODEL=your-model-name +``` + +> **Note**: OpenAI STT does not support real-time translation. Only the original +> transcript language is returned, matching the user's BBB speech locale. + ### Development #### Testing @@ -114,12 +142,20 @@ Run with coverage: uv run pytest tests/ --ignore=tests/integration --cov --cov-report=term-missing ``` -Integration tests require a real Gladia API key and make live requests to the Gladia service. Set `GLADIA_API_KEY` and run: +Integration tests require a real API key and make live requests to the STT service. + +For Gladia, set `GLADIA_API_KEY` and run: ```bash GLADIA_API_KEY=your-key uv run pytest tests/integration -m integration ``` +For OpenAI, set `OPENAI_API_KEY` and run: + +```bash +OPENAI_API_KEY=your-key uv run pytest tests/integration -m integration +``` + #### Linting This project uses [ruff](https://docs.astral.sh/ruff/) for linting and formatting. To check for issues: diff --git a/config.py b/config.py index f08f405..e49f05d 100644 --- a/config.py +++ b/config.py @@ -223,6 +223,35 @@ def to_dict(self): gladia_config = GladiaConfig() +@dataclass +class OpenAiConfig: + api_key: str | None = field(default_factory=lambda: os.getenv("OPENAI_API_KEY")) + model: str = field( + default_factory=lambda: os.getenv("OPENAI_STT_MODEL", "gpt-4o-transcribe") + ) + base_url: str | None = field( + default_factory=lambda: os.getenv("OPENAI_BASE_URL", None) + ) + # OpenAI STT does not return confidence scores; default 0.0 disables filtering + min_confidence_final: float = field( + default_factory=lambda: _get_float_env("OPENAI_MIN_CONFIDENCE_FINAL", 0.0) + ) + min_confidence_interim: float = field( + default_factory=lambda: _get_float_env("OPENAI_MIN_CONFIDENCE_INTERIM", 0.0) + ) + interim_results: bool | None = field( + default_factory=lambda: _get_bool_env("OPENAI_INTERIM_RESULTS", None) + ) + + def to_dict(self): + data = {"api_key": self.api_key, "model": self.model, "base_url": self.base_url} + return {k: v for k, v in data.items() if v is not None} + + +openai_config = OpenAiConfig() +stt_provider = os.getenv("STT_PROVIDER", "gladia").lower() + + def redact_config_values(value: object, key: str | None = None) -> object: if key and key.lower() in REDACTED_CONFIG_KEYS: return "***REDACTED***" if value not in (None, "") else value @@ -238,7 +267,9 @@ def redact_config_values(value: object, key: str | None = None) -> object: def get_redacted_app_config() -> Dict[str, Any]: config_payload = { + "stt_provider": stt_provider, "redis": asdict(redis_config), "gladia": asdict(gladia_config), + "openai": asdict(openai_config), } return redact_config_values(config_payload) diff --git a/main.py b/main.py index 482e0ad..b76b4f1 100644 --- a/main.py +++ b/main.py @@ -10,7 +10,13 @@ from redis_manager import RedisManager from gladia_stt_agent import GladiaSttAgent -from config import get_redacted_app_config, gladia_config, redis_config +from config import ( + get_redacted_app_config, + gladia_config, + openai_config, + redis_config, + stt_provider, +) from utils import coerce_min_utterance_length_seconds, coerce_partial_utterances load_dotenv() @@ -34,7 +40,14 @@ async def entrypoint(ctx: JobContext): _log_startup_configuration() redis_manager = RedisManager(redis_config) - agent = GladiaSttAgent(gladia_config) + if stt_provider == "openai": + from openai_stt_agent import OpenAiSttAgent + + agent = OpenAiSttAgent(openai_config) + active_stt_config = openai_config + else: + agent = GladiaSttAgent(gladia_config) + active_stt_config = gladia_config async def on_redis_message(message_data: str): try: @@ -108,15 +121,16 @@ async def on_final_transcript( for alternative in event.alternatives: if _is_below_min_confidence( - alternative, gladia_config.min_confidence_final + alternative, active_stt_config.min_confidence_final ): logging.debug( f"Discarding final transcript for {participant.identity}: " - f"low confidence ({alternative.confidence} < {gladia_config.min_confidence_final})." + f"low confidence ({alternative.confidence} < {active_stt_config.min_confidence_final})." ) continue - transcript_lang = alternative.language + # OpenAI STT may not report a language; fall back to the original lang. + transcript_lang = alternative.language or original_lang text = alternative.text bbb_locale = None start_time_adjusted = math.floor(open_time + alternative.start_time) @@ -186,15 +200,16 @@ async def on_interim_transcript( for alternative in event.alternatives: if _is_below_min_confidence( - alternative, gladia_config.min_confidence_interim + alternative, active_stt_config.min_confidence_interim ): logging.debug( f"Discarding interim transcript for {participant.identity}: " - f"low confidence ({alternative.confidence} < {gladia_config.min_confidence_interim})." + f"low confidence ({alternative.confidence} < {active_stt_config.min_confidence_interim})." ) continue - transcript_lang = alternative.language + # OpenAI STT may not report a language; fall back to the original lang. + transcript_lang = alternative.language or original_lang text = alternative.text start_time_adjusted = math.floor(open_time + alternative.start_time) end_time_adjusted = math.floor(open_time + alternative.end_time) diff --git a/openai_stt_agent.py b/openai_stt_agent.py new file mode 100644 index 0000000..ea4941d --- /dev/null +++ b/openai_stt_agent.py @@ -0,0 +1,221 @@ +import asyncio +import logging +import time + +from livekit import rtc +from livekit.agents import ( + AutoSubscribe, + JobContext, + stt, +) +from livekit.plugins import openai as openai_plugin + +from config import OpenAiConfig +from events import EventEmitter + + +class OpenAiSttAgent(EventEmitter): + def __init__(self, config: OpenAiConfig): + super().__init__() + self.config = config + self.stt = openai_plugin.STT(**config.to_dict()) + self.ctx: JobContext | None = None + self.room: rtc.Room | None = None + self.processing_info = {} + self.participant_settings = {} + self.open_time = time.time() + self._shutdown = asyncio.Event() + + async def start(self, ctx: JobContext): + self.ctx = ctx + await self.ctx.connect(auto_subscribe=AutoSubscribe.AUDIO_ONLY) + self.room = self.ctx.room + + self.room.on("participant_disconnected", self._on_participant_disconnected) + self.room.on("disconnected", self._on_disconnected) + self.room.on("track_subscribed", self._on_track_subscribed) + self.room.on("track_unsubscribed", self._on_track_unsubscribed) + + try: + await self._shutdown.wait() + finally: + await self._cleanup() + + async def _cleanup(self): + for user_id in list(self.processing_info.keys()): + self.stop_transcription_for_user(user_id) + + await asyncio.sleep(0.1) + + def start_transcription_for_user(self, user_id: str, locale: str, provider: str): + settings = self.participant_settings.setdefault(user_id, {}) + settings["locale"] = locale + settings["provider"] = provider + + participant = self._find_participant(user_id) + + if not participant: + logging.error( + f"Cannot start transcription, participant {user_id} not found." + ) + return + + track = self._find_audio_track(participant) + + if not track: + logging.warning( + f"Won't start transcription yet, no audio track found for {user_id}." + ) + return + + if participant.identity in self.processing_info: + logging.debug( + f"Transcription task already running for {participant.identity}, ignoring start command." + ) + return + + openai_locale = self._sanitize_locale(locale) + stt_stream = self.stt.stream(language=openai_locale) + task = asyncio.create_task( + self._run_transcription_pipeline(participant, track, stt_stream) + ) + self.processing_info[participant.identity] = { + "stream": stt_stream, + "task": task, + } + logging.info( + f"Started transcription for {participant.identity} with locale {locale}." + ) + + def stop_transcription_for_user(self, user_id: str): + logging.debug(f"Stopping transcription for {user_id}.") + + if user_id in self.processing_info: + info = self.processing_info.pop(user_id) + info["task"].cancel() + logging.info(f"Stopped transcription for user {user_id}.") + + def update_locale_for_user(self, user_id: str, locale: str): + if user_id in self.participant_settings: + self.participant_settings[user_id]["locale"] = locale + + if user_id in self.processing_info: + logging.info(f"Updating locale to '{locale}' for user {user_id}.") + provider = self.participant_settings.get(user_id, {}).get( + "provider", "openai" + ) + # OpenAI STT does not support live stream.update_options(); restart the pipeline. + self.stop_transcription_for_user(user_id) + self.start_transcription_for_user(user_id, locale, provider) + else: + logging.warning( + f"Won't update locale, no active transcription for user {user_id}." + ) + + def _on_track_subscribed( + self, + track: rtc.Track, + publication: rtc.TrackPublication, + participant: rtc.RemoteParticipant, + ): + if publication.source != rtc.TrackSource.SOURCE_MICROPHONE: + logging.debug( + f"Skipping transcription for {participant.identity}'s track {track.sid} because it's not a microphone." + ) + return + + settings = self.participant_settings.get(participant.identity) + + locale = settings.get("locale") if settings else None + provider = settings.get("provider") if settings else None + + if locale and provider: + logging.debug( + f"Participant {participant.identity} subscribed with active settings, starting transcription.", + extra={"settings": settings}, + ) + self.start_transcription_for_user(participant.identity, locale, provider) + else: + logging.debug( + f"Participant {participant.identity} subscribed with no active settings, skipping transcription." + ) + + def _on_track_unsubscribed( + self, + track: rtc.Track, + publication: rtc.TrackPublication, + participant: rtc.RemoteParticipant, + ): + self.stop_transcription_for_user(participant.identity) + + def _on_participant_disconnected(self, participant: rtc.RemoteParticipant, *_): + logging.debug( + f"Participant {participant.identity} disconnected, stopping transcription." + ) + self.stop_transcription_for_user(participant.identity) + self.participant_settings.pop(participant.identity, None) + + def _on_disconnected(self): + self._shutdown.set() + + def _find_participant(self, identity: str) -> rtc.RemoteParticipant | None: + for p in self.room.remote_participants.values(): + if p.identity == identity: + return p + return None + + def _find_audio_track(self, participant: rtc.RemoteParticipant) -> rtc.Track | None: + for pub in participant.track_publications.values(): + if pub.track and pub.track.kind == rtc.TrackKind.KIND_AUDIO: + return pub.track + return None + + def _sanitize_locale(self, locale: str) -> str: + # OpenAI STT accepts ISO 639-1 language codes (e.g. "en") + # BBB uses - format (e.g. "en-US") + return locale.split("-")[0].lower() + + async def _run_transcription_pipeline( + self, + participant: rtc.RemoteParticipant, + track: rtc.Track, + stt_stream: stt.SpeechStream, + ): + audio_stream = rtc.AudioStream(track) + self.open_time = time.time() + + async def forward_audio_task(): + try: + async for audio_event in audio_stream: + stt_stream.push_frame(audio_event.frame) + finally: + stt_stream.flush() + + async def process_stt_task(): + async for event in stt_stream: + if event.type == stt.SpeechEventType.FINAL_TRANSCRIPT: + self.emit( + "final_transcript", + participant=participant, + event=event, + open_time=self.open_time, + ) + elif ( + event.type == stt.SpeechEventType.INTERIM_TRANSCRIPT + and self.config.interim_results + ): + self.emit( + "interim_transcript", + participant=participant, + event=event, + open_time=self.open_time, + ) + + try: + await asyncio.gather(forward_audio_task(), process_stt_task()) + except asyncio.CancelledError: + logging.info(f"Transcription for {participant.identity} was cancelled.") + except Exception as e: + logging.error(f"Error during transcription for track {track.sid}: {e}") + finally: + self.processing_info.pop(participant.identity, None) diff --git a/pyproject.toml b/pyproject.toml index ad9fe3a..56d588d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ description = "Provides STT for BigBlueButton meetings using LiveKit as their au readme = "README.md" requires-python = ">=3.10" dependencies = [ - "livekit-agents[gladia]~=1.4", + "livekit-agents[gladia,openai]~=1.4", "python-dotenv~=1.1.1", "redis~=6.4.0", "nest-asyncio~=1.6.0", @@ -25,7 +25,7 @@ testpaths = ["tests"] pythonpath = ["."] [tool.coverage.run] -source = ["config", "events", "gladia_stt_agent", "redis_manager", "utils"] +source = ["config", "events", "gladia_stt_agent", "openai_stt_agent", "redis_manager", "utils"] omit = ["tests/*"] [tool.coverage.report] diff --git a/tests/integration/test_openai_stt.py b/tests/integration/test_openai_stt.py new file mode 100644 index 0000000..b5edf6c --- /dev/null +++ b/tests/integration/test_openai_stt.py @@ -0,0 +1,94 @@ +"""Integration tests for the OpenAI STT pipeline. + +These tests require a valid OPENAI_API_KEY environment variable and make real +requests to the OpenAI transcription service. They are skipped automatically +when the key is absent. +""" + +import asyncio +import os + +import pytest +from livekit import rtc +from livekit.agents import stt +from livekit.plugins import openai as openai_plugin + +pytestmark = pytest.mark.skipif( + not os.environ.get("OPENAI_API_KEY"), + reason="OPENAI_API_KEY environment variable is not set", +) + + +@pytest.mark.integration +@pytest.mark.usefixtures("job_process") +async def test_openai_stt_stream_opens_and_closes(): + """Verify that an OpenAI STT stream can be created and closed without errors.""" + api_key = os.environ["OPENAI_API_KEY"] + model = os.environ.get("OPENAI_STT_MODEL", "gpt-4o-transcribe") + base_url = os.environ.get("OPENAI_BASE_URL") + + kwargs = {"api_key": api_key, "model": model} + if base_url: + kwargs["base_url"] = base_url + + async with openai_plugin.STT(**kwargs) as openai_stt: + stream = openai_stt.stream(language="en") + await stream.aclose() + + +@pytest.mark.integration +@pytest.mark.usefixtures("job_process") +async def test_openai_stt_stream_accepts_silent_audio(): + """Verify that the OpenAI STT stream processes silent PCM audio without errors. + + This tests end-to-end connectivity: frames are pushed through the STT + stream, the stream is flushed, and no exceptions are raised. Silent audio + is expected to produce no transcript events. + """ + api_key = os.environ["OPENAI_API_KEY"] + model = os.environ.get("OPENAI_STT_MODEL", "gpt-4o-transcribe") + base_url = os.environ.get("OPENAI_BASE_URL") + + kwargs = {"api_key": api_key, "model": model} + if base_url: + kwargs["base_url"] = base_url + + async with openai_plugin.STT(**kwargs) as openai_stt: + stream = openai_stt.stream(language="en") + + # Build a 100 ms silent PCM frame (16-bit mono @ 16 kHz → 1600 samples) + samples_per_frame = 1600 + silent_frame = rtc.AudioFrame( + data=bytes(samples_per_frame * 2), # 2 bytes per int16 sample + sample_rate=16000, + num_channels=1, + samples_per_channel=samples_per_frame, + ) + + events_received = [] + + async def collect_events(): + async for event in stream: + events_received.append(event) + + collector = asyncio.create_task(collect_events()) + + # Push 500 ms of silence in five 100 ms chunks + for _ in range(5): + stream.push_frame(silent_frame) + stream.flush() + + # Give the service a moment to respond, then close + await asyncio.sleep(3) + await stream.aclose() + collector.cancel() + try: + await collector + except asyncio.CancelledError: + pass + + # Silent audio should not produce any FINAL_TRANSCRIPT events + final_transcripts = [ + e for e in events_received if e.type == stt.SpeechEventType.FINAL_TRANSCRIPT + ] + assert len(final_transcripts) == 0 diff --git a/tests/test_config.py b/tests/test_config.py index 8f16832..0bdcf5a 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -4,6 +4,7 @@ from config import ( GladiaConfig, + OpenAiConfig, _get_bool_env, _get_float_env, _get_json_env, @@ -223,3 +224,101 @@ def test_min_confidence_interim_overrides_base(self, monkeypatch): config = GladiaConfig() assert config.min_confidence_interim == pytest.approx(0.2) assert config.min_confidence_final == pytest.approx(0.5) + + +class TestOpenAiConfigDefaults: + @pytest.fixture(autouse=True) + def _clean_openai_env(self, monkeypatch): + """Remove all OPENAI_* env vars so dataclass defaults are exercised.""" + for key in list(os.environ): + if key.startswith("OPENAI_"): + monkeypatch.delenv(key, raising=False) + + def test_model_defaults_to_gpt4o_transcribe(self): + config = OpenAiConfig() + assert config.model == "gpt-4o-transcribe" + + def test_api_key_defaults_to_none(self): + config = OpenAiConfig() + assert config.api_key is None + + def test_base_url_defaults_to_none(self): + config = OpenAiConfig() + assert config.base_url is None + + def test_min_confidence_defaults_to_0_0(self): + config = OpenAiConfig() + assert config.min_confidence_final == pytest.approx(0.0) + assert config.min_confidence_interim == pytest.approx(0.0) + + def test_interim_results_defaults_to_none(self): + config = OpenAiConfig() + assert config.interim_results is None + + def test_model_overridden_by_env_var(self, monkeypatch): + monkeypatch.setenv("OPENAI_STT_MODEL", "whisper-1") + config = OpenAiConfig() + assert config.model == "whisper-1" + + def test_base_url_overridden_by_env_var(self, monkeypatch): + monkeypatch.setenv("OPENAI_BASE_URL", "http://localhost:8000") + config = OpenAiConfig() + assert config.base_url == "http://localhost:8000" + + def test_api_key_overridden_by_env_var(self, monkeypatch): + monkeypatch.setenv("OPENAI_API_KEY", "sk-test") + config = OpenAiConfig() + assert config.api_key == "sk-test" + + def test_min_confidence_overridden_by_env_var(self, monkeypatch): + monkeypatch.setenv("OPENAI_MIN_CONFIDENCE_FINAL", "0.8") + monkeypatch.setenv("OPENAI_MIN_CONFIDENCE_INTERIM", "0.4") + config = OpenAiConfig() + assert config.min_confidence_final == pytest.approx(0.8) + assert config.min_confidence_interim == pytest.approx(0.4) + + +class TestOpenAiConfigToDict: + def test_excludes_none_fields(self): + config = OpenAiConfig(api_key=None, base_url=None, model="whisper-1") + result = config.to_dict() + assert "api_key" not in result + assert "base_url" not in result + + def test_includes_model(self): + config = OpenAiConfig(model="gpt-4o-transcribe") + result = config.to_dict() + assert result["model"] == "gpt-4o-transcribe" + + def test_includes_api_key_when_set(self): + config = OpenAiConfig(api_key="sk-test", model="whisper-1") + result = config.to_dict() + assert result["api_key"] == "sk-test" + + def test_includes_base_url_when_set(self): + config = OpenAiConfig(model="whisper-1", base_url="http://localhost:8000") + result = config.to_dict() + assert result["base_url"] == "http://localhost:8000" + + def test_does_not_include_confidence_thresholds(self): + """min_confidence_* are internal; not passed to the LiveKit plugin.""" + config = OpenAiConfig(model="whisper-1") + result = config.to_dict() + assert "min_confidence_final" not in result + assert "min_confidence_interim" not in result + assert "interim_results" not in result + + +class TestSttProvider: + def test_defaults_to_gladia(self, monkeypatch): + monkeypatch.delenv("STT_PROVIDER", raising=False) + # Re-evaluate the module-level expression via direct env check + assert os.getenv("STT_PROVIDER", "gladia").lower() == "gladia" + + def test_openai_when_env_set(self, monkeypatch): + monkeypatch.setenv("STT_PROVIDER", "openai") + assert os.getenv("STT_PROVIDER", "gladia").lower() == "openai" + + def test_case_insensitive(self, monkeypatch): + monkeypatch.setenv("STT_PROVIDER", "OpenAI") + assert os.getenv("STT_PROVIDER", "gladia").lower() == "openai" diff --git a/tests/test_openai_stt_agent.py b/tests/test_openai_stt_agent.py new file mode 100644 index 0000000..44bf028 --- /dev/null +++ b/tests/test_openai_stt_agent.py @@ -0,0 +1,407 @@ +import asyncio +import contextlib +import logging +from unittest.mock import AsyncMock, MagicMock, patch + +from livekit import rtc +from livekit.agents import stt + +from config import OpenAiConfig +from openai_stt_agent import OpenAiSttAgent + + +def _make_agent(interim_results=None, **kwargs): + config = OpenAiConfig(api_key="fake-key", interim_results=interim_results, **kwargs) + with patch("openai_stt_agent.openai_plugin") as mock_plugin: + mock_plugin.STT.return_value = MagicMock() + agent = OpenAiSttAgent(config) + return agent + + +def _make_track_subscribed_args(source=rtc.TrackSource.SOURCE_MICROPHONE): + mock_track = MagicMock() + mock_publication = MagicMock() + mock_publication.source = source + mock_participant = MagicMock() + return mock_track, mock_publication, mock_participant + + +def _make_agent_with_room(interim_results=None, participants=None, **kwargs): + """Create an agent with a mocked room containing the given participants.""" + agent = _make_agent(interim_results=interim_results, **kwargs) + mock_room = MagicMock() + participants = participants or {} + mock_room.remote_participants = participants + agent.room = mock_room + return agent + + +def _make_participant(identity, audio_track=None): + """Create a mock RemoteParticipant with an optional audio track.""" + participant = MagicMock(spec=rtc.RemoteParticipant) + participant.identity = identity + pubs = {} + if audio_track: + pub = MagicMock() + pub.track = audio_track + pub.track.kind = rtc.TrackKind.KIND_AUDIO + pubs["audio"] = pub + participant.track_publications = pubs + return participant + + +class TestSanitizeLocale: + def test_strips_region_from_bcp47_locale(self): + agent = _make_agent() + assert agent._sanitize_locale("en-US") == "en" + assert agent._sanitize_locale("pt-BR") == "pt" + assert agent._sanitize_locale("zh-CN") == "zh" + assert agent._sanitize_locale("fr-FR") == "fr" + + def test_returns_language_code_unchanged_when_no_region(self): + agent = _make_agent() + assert agent._sanitize_locale("en") == "en" + assert agent._sanitize_locale("de") == "de" + + def test_lowercases_language_code(self): + agent = _make_agent() + assert agent._sanitize_locale("EN-US") == "en" + assert agent._sanitize_locale("PT") == "pt" + + +class TestStopTranscriptionForUser: + def test_cancels_task_and_removes_from_processing_info(self): + agent = _make_agent() + mock_task = MagicMock() + agent.processing_info["user_123"] = {"task": mock_task, "stream": MagicMock()} + + agent.stop_transcription_for_user("user_123") + + mock_task.cancel.assert_called_once() + assert "user_123" not in agent.processing_info + + def test_no_op_when_user_not_in_processing_info(self): + agent = _make_agent() + # Should not raise even if user_id is unknown + agent.stop_transcription_for_user("unknown_user") + + +class TestUpdateLocaleForUser: + def test_updates_locale_in_participant_settings(self): + agent = _make_agent_with_room() + agent.participant_settings["user_1"] = {"locale": "en", "provider": "openai"} + + agent.update_locale_for_user("user_1", "fr") + + assert agent.participant_settings["user_1"]["locale"] == "fr" + + def test_restarts_transcription_when_active(self): + """OpenAI STT requires stop+restart to change locale (no update_options).""" + mock_track = MagicMock() + mock_track.kind = rtc.TrackKind.KIND_AUDIO + participant = _make_participant("user_1", audio_track=mock_track) + agent = _make_agent_with_room(participants={"pid": participant}) + agent.participant_settings["user_1"] = {"locale": "en", "provider": "openai"} + mock_task = MagicMock() + agent.processing_info["user_1"] = {"stream": MagicMock(), "task": mock_task} + + with ( + patch.object(agent, "stop_transcription_for_user") as mock_stop, + patch.object(agent, "start_transcription_for_user") as mock_start, + ): + agent.update_locale_for_user("user_1", "de") + + mock_stop.assert_called_once_with("user_1") + mock_start.assert_called_once_with("user_1", "de", "openai") + + def test_does_not_restart_when_no_active_transcription(self): + agent = _make_agent_with_room() + agent.participant_settings["user_1"] = {"locale": "en", "provider": "openai"} + + with ( + patch.object(agent, "stop_transcription_for_user") as mock_stop, + patch.object(agent, "start_transcription_for_user") as mock_start, + ): + agent.update_locale_for_user("user_1", "fr") + + mock_stop.assert_not_called() + mock_start.assert_not_called() + assert agent.participant_settings["user_1"]["locale"] == "fr" + + def test_no_op_in_settings_when_user_not_in_participant_settings(self): + agent = _make_agent_with_room() + # Should not raise or create settings entry + agent.update_locale_for_user("unknown_user", "en") + assert "unknown_user" not in agent.participant_settings + + +class TestOnParticipantDisconnected: + def test_stops_transcription_and_clears_settings(self): + agent = _make_agent() + agent.participant_settings["user_1"] = {"locale": "en", "provider": "openai"} + mock_task = MagicMock() + agent.processing_info["user_1"] = {"task": mock_task, "stream": MagicMock()} + + mock_participant = MagicMock() + mock_participant.identity = "user_1" + + agent._on_participant_disconnected(mock_participant) + + mock_task.cancel.assert_called_once() + assert "user_1" not in agent.processing_info + assert "user_1" not in agent.participant_settings + + def test_no_op_for_unknown_participant(self): + agent = _make_agent() + mock_participant = MagicMock() + mock_participant.identity = "ghost_user" + + agent._on_participant_disconnected(mock_participant) + + assert "ghost_user" not in agent.participant_settings + + +class TestOnTrackSubscribed: + def test_skips_non_microphone_tracks(self): + agent = _make_agent() + agent.participant_settings["user_1"] = {"locale": "en-US", "provider": "openai"} + mock_track, mock_publication, mock_participant = _make_track_subscribed_args( + source=rtc.TrackSource.SOURCE_CAMERA + ) + mock_participant.identity = "user_1" + + with patch.object(agent, "start_transcription_for_user") as mock_start: + agent._on_track_subscribed(mock_track, mock_publication, mock_participant) + mock_start.assert_not_called() + + def test_skips_transcription_when_no_settings(self): + agent = _make_agent() + mock_track, mock_publication, mock_participant = _make_track_subscribed_args() + mock_participant.identity = "user_no_settings" + + with patch.object(agent, "start_transcription_for_user") as mock_start: + agent._on_track_subscribed(mock_track, mock_publication, mock_participant) + mock_start.assert_not_called() + + def test_starts_transcription_when_locale_and_provider_present(self): + agent = _make_agent() + agent.participant_settings["user_1"] = {"locale": "en-US", "provider": "openai"} + mock_track, mock_publication, mock_participant = _make_track_subscribed_args() + mock_participant.identity = "user_1" + + with patch.object(agent, "start_transcription_for_user") as mock_start: + agent._on_track_subscribed(mock_track, mock_publication, mock_participant) + mock_start.assert_called_once_with("user_1", "en-US", "openai") + + +class TestStartTranscriptionForUser: + def test_logs_error_when_participant_not_found(self, caplog): + agent = _make_agent_with_room(participants={}) + + with caplog.at_level(logging.ERROR): + agent.start_transcription_for_user("missing_user", "en-US", "openai") + + assert "missing_user" not in agent.processing_info + assert any("not found" in r.message for r in caplog.records) + + def test_logs_warning_when_no_audio_track(self, caplog): + participant = _make_participant("user_1", audio_track=None) + agent = _make_agent_with_room(participants={"pid": participant}) + + with caplog.at_level(logging.WARNING): + agent.start_transcription_for_user("user_1", "en-US", "openai") + + assert "user_1" not in agent.processing_info + assert any("no audio track" in r.message for r in caplog.records) + + def test_skips_when_already_processing(self, caplog): + mock_track = MagicMock() + mock_track.kind = rtc.TrackKind.KIND_AUDIO + participant = _make_participant("user_1", audio_track=mock_track) + agent = _make_agent_with_room(participants={"pid": participant}) + + existing_task = MagicMock() + agent.processing_info["user_1"] = {"task": existing_task, "stream": MagicMock()} + + with caplog.at_level(logging.DEBUG): + agent.start_transcription_for_user("user_1", "en-US", "openai") + + assert agent.processing_info["user_1"]["task"] is existing_task + + async def test_creates_stream_and_task_on_success(self): + mock_track = MagicMock() + mock_track.kind = rtc.TrackKind.KIND_AUDIO + participant = _make_participant("user_1", audio_track=mock_track) + agent = _make_agent_with_room(participants={"pid": participant}) + + with patch("openai_stt_agent.rtc.AudioStream"): + agent.start_transcription_for_user("user_1", "en-US", "openai") + + assert "user_1" in agent.processing_info + info = agent.processing_info["user_1"] + assert "stream" in info + assert "task" in info + assert agent.participant_settings["user_1"]["locale"] == "en-US" + assert agent.participant_settings["user_1"]["provider"] == "openai" + + info["task"].cancel() + with contextlib.suppress(asyncio.CancelledError): + await info["task"] + + async def test_sanitizes_locale_before_creating_stream(self): + """Locale 'pt-BR' should be sanitized to 'pt' for OpenAI STT.""" + mock_track = MagicMock() + mock_track.kind = rtc.TrackKind.KIND_AUDIO + participant = _make_participant("user_1", audio_track=mock_track) + agent = _make_agent_with_room(participants={"pid": participant}) + + with patch("openai_stt_agent.rtc.AudioStream"): + agent.start_transcription_for_user("user_1", "pt-BR", "openai") + agent.stt.stream.assert_called_once_with(language="pt") + + agent.processing_info["user_1"]["task"].cancel() + with contextlib.suppress(asyncio.CancelledError): + await agent.processing_info["user_1"]["task"] + + +class TestRunTranscriptionPipeline: + async def test_cancellation_cleans_up_processing_info(self): + agent = _make_agent() + mock_participant = MagicMock(spec=rtc.RemoteParticipant) + mock_participant.identity = "user_1" + mock_track = MagicMock() + + mock_audio_stream = AsyncMock() + mock_audio_stream.__aiter__.side_effect = asyncio.CancelledError + + mock_stt_stream = AsyncMock() + mock_stt_stream.__aiter__.return_value = iter([]) + + agent.processing_info["user_1"] = { + "stream": mock_stt_stream, + "task": MagicMock(), + } + + with patch("openai_stt_agent.rtc.AudioStream", return_value=mock_audio_stream): + await agent._run_transcription_pipeline( + mock_participant, mock_track, mock_stt_stream + ) + + assert "user_1" not in agent.processing_info + + async def test_emits_final_transcript_event(self): + agent = _make_agent() + mock_participant = MagicMock(spec=rtc.RemoteParticipant) + mock_participant.identity = "user_1" + mock_track = MagicMock() + + mock_audio_stream = AsyncMock() + mock_audio_stream.__aiter__.return_value = iter([]) + + mock_event = MagicMock() + mock_event.type = stt.SpeechEventType.FINAL_TRANSCRIPT + mock_stt_stream = AsyncMock() + mock_stt_stream.__aiter__.return_value = iter([mock_event]) + + emitted = [] + agent.on("final_transcript", lambda **kw: emitted.append(kw)) + + with patch("openai_stt_agent.rtc.AudioStream", return_value=mock_audio_stream): + await agent._run_transcription_pipeline( + mock_participant, mock_track, mock_stt_stream + ) + await asyncio.sleep(0) + + assert len(emitted) == 1 + assert emitted[0]["participant"] is mock_participant + assert emitted[0]["event"] is mock_event + + async def test_emits_interim_transcript_when_enabled(self): + agent = _make_agent(interim_results=True) + mock_participant = MagicMock(spec=rtc.RemoteParticipant) + mock_participant.identity = "user_1" + mock_track = MagicMock() + + mock_audio_stream = AsyncMock() + mock_audio_stream.__aiter__.return_value = iter([]) + + mock_event = MagicMock() + mock_event.type = stt.SpeechEventType.INTERIM_TRANSCRIPT + mock_stt_stream = AsyncMock() + mock_stt_stream.__aiter__.return_value = iter([mock_event]) + + emitted = [] + agent.on("interim_transcript", lambda **kw: emitted.append(kw)) + + with patch("openai_stt_agent.rtc.AudioStream", return_value=mock_audio_stream): + await agent._run_transcription_pipeline( + mock_participant, mock_track, mock_stt_stream + ) + await asyncio.sleep(0) + + assert len(emitted) == 1 + + async def test_suppresses_interim_transcript_when_disabled(self): + agent = _make_agent(interim_results=False) + mock_participant = MagicMock(spec=rtc.RemoteParticipant) + mock_participant.identity = "user_1" + mock_track = MagicMock() + + mock_audio_stream = AsyncMock() + mock_audio_stream.__aiter__.return_value = iter([]) + + mock_event = MagicMock() + mock_event.type = stt.SpeechEventType.INTERIM_TRANSCRIPT + mock_stt_stream = AsyncMock() + mock_stt_stream.__aiter__.return_value = iter([mock_event]) + + emitted = [] + agent.on("interim_transcript", lambda **kw: emitted.append(kw)) + + with patch("openai_stt_agent.rtc.AudioStream", return_value=mock_audio_stream): + await agent._run_transcription_pipeline( + mock_participant, mock_track, mock_stt_stream + ) + await asyncio.sleep(0) + + assert len(emitted) == 0 + + async def test_generic_exception_cleans_up_processing_info(self): + agent = _make_agent() + mock_participant = MagicMock(spec=rtc.RemoteParticipant) + mock_participant.identity = "user_1" + mock_track = MagicMock() + + mock_audio_stream = AsyncMock() + mock_audio_stream.__aiter__.side_effect = RuntimeError("boom") + + mock_stt_stream = AsyncMock() + mock_stt_stream.__aiter__.return_value = iter([]) + + agent.processing_info["user_1"] = { + "stream": mock_stt_stream, + "task": MagicMock(), + } + + with patch("openai_stt_agent.rtc.AudioStream", return_value=mock_audio_stream): + await agent._run_transcription_pipeline( + mock_participant, mock_track, mock_stt_stream + ) + + assert "user_1" not in agent.processing_info + + +class TestCleanup: + async def test_cleanup_stops_all_active_transcriptions(self): + agent = _make_agent() + tasks = {} + for uid in ("user_1", "user_2", "user_3"): + mock_task = MagicMock() + agent.processing_info[uid] = {"task": mock_task, "stream": MagicMock()} + tasks[uid] = mock_task + + await agent._cleanup() + + for uid, mock_task in tasks.items(): + mock_task.cancel.assert_called_once() + assert uid not in agent.processing_info diff --git a/uv.lock b/uv.lock index 23cab9b..586d4f2 100644 --- a/uv.lock +++ b/uv.lock @@ -233,7 +233,7 @@ name = "bbb-livekit-stt" version = "0.2.0" source = { virtual = "." } dependencies = [ - { name = "livekit-agents", extra = ["gladia"] }, + { name = "livekit-agents", extra = ["gladia", "openai"] }, { name = "nest-asyncio" }, { name = "python-dotenv" }, { name = "redis" }, @@ -241,15 +241,15 @@ dependencies = [ [package.dev-dependencies] dev = [ - { name = "ruff" }, { name = "pytest" }, { name = "pytest-asyncio" }, { name = "pytest-cov" }, + { name = "ruff" }, ] [package.metadata] requires-dist = [ - { name = "livekit-agents", extras = ["gladia"], specifier = "~=1.4" }, + { name = "livekit-agents", extras = ["gladia", "openai"], specifier = "~=1.4" }, { name = "nest-asyncio", specifier = "~=1.6.0" }, { name = "python-dotenv", specifier = "~=1.1.1" }, { name = "redis", specifier = "~=6.4.0" }, @@ -257,10 +257,10 @@ requires-dist = [ [package.metadata.requires-dev] dev = [ - { name = "ruff", specifier = ">=0.15.4" }, { name = "pytest", specifier = ">=9.0.2" }, { name = "pytest-asyncio", specifier = ">=1.3.0" }, { name = "pytest-cov", specifier = ">=7.0.0" }, + { name = "ruff", specifier = ">=0.15.4" }, ] [[package]] @@ -957,6 +957,12 @@ codecs = [ gladia = [ { name = "livekit-plugins-gladia" }, ] +images = [ + { name = "pillow" }, +] +openai = [ + { name = "livekit-plugins-openai" }, +] [[package]] name = "livekit-api" @@ -1026,6 +1032,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fd/81/83f1a11e3b1a063559b871b8eeedca139c156cf0eba6e0d91a4083eaf8e5/livekit_plugins_gladia-1.4.2-py3-none-any.whl", hash = "sha256:8c6564bac7eaef75a834d635493c82ca76db7e736b2bd0091700ed2254ef5835", size = 15281, upload-time = "2026-02-17T01:27:04.668Z" }, ] +[[package]] +name = "livekit-plugins-openai" +version = "1.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "livekit-agents", extra = ["codecs", "images"] }, + { name = "openai", extra = ["realtime"] }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/06/881d11f07eb61619b1cf7fb84fc20c8365169cb09a557e3d4c4ae00f8cdc/livekit_plugins_openai-1.4.2.tar.gz", hash = "sha256:f298388dcf69c38cf5bd105ee0e808e5b555d07b9e515e9d18276450f81518eb", size = 49013, upload-time = "2026-02-17T01:27:29.864Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6c/92/4d2e70713ed1a7a20de3768cbeb2ae77931e6c5fc3d555b42f6bdd913b38/livekit_plugins_openai-1.4.2-py3-none-any.whl", hash = "sha256:fbdb9d5eff41dcb725210e10d331a8866fd62db4db5e2b8f203ae9dcb68557ce", size = 56942, upload-time = "2026-02-17T01:27:28.872Z" }, +] + [[package]] name = "livekit-protocol" version = "1.1.2" @@ -1340,6 +1359,11 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cc/56/0a89092a453bb2c676d66abee44f863e742b2110d4dbb1dbcca3f7e5fc33/openai-2.21.0-py3-none-any.whl", hash = "sha256:0bc1c775e5b1536c294eded39ee08f8407656537ccc71b1004104fe1602e267c", size = 1103065, upload-time = "2026-02-14T00:11:59.603Z" }, ] +[package.optional-dependencies] +realtime = [ + { name = "websockets" }, +] + [[package]] name = "opentelemetry-api" version = "1.39.1" @@ -1462,6 +1486,104 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" }, ] +[[package]] +name = "pillow" +version = "12.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1f/42/5c74462b4fd957fcd7b13b04fb3205ff8349236ea74c7c375766d6c82288/pillow-12.1.1.tar.gz", hash = "sha256:9ad8fa5937ab05218e2b6a4cff30295ad35afd2f83ac592e68c0d871bb0fdbc4", size = 46980264, upload-time = "2026-02-11T04:23:07.146Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/30/5bd3d794762481f8c8ae9c80e7b76ecea73b916959eb587521358ef0b2f9/pillow-12.1.1-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1f1625b72740fdda5d77b4def688eb8fd6490975d06b909fd19f13f391e077e0", size = 5304099, upload-time = "2026-02-11T04:20:06.13Z" }, + { url = "https://files.pythonhosted.org/packages/bd/c1/aab9e8f3eeb4490180e357955e15c2ef74b31f64790ff356c06fb6cf6d84/pillow-12.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:178aa072084bd88ec759052feca8e56cbb14a60b39322b99a049e58090479713", size = 4657880, upload-time = "2026-02-11T04:20:09.291Z" }, + { url = "https://files.pythonhosted.org/packages/f1/0a/9879e30d56815ad529d3985aeff5af4964202425c27261a6ada10f7cbf53/pillow-12.1.1-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b66e95d05ba806247aaa1561f080abc7975daf715c30780ff92a20e4ec546e1b", size = 6222587, upload-time = "2026-02-11T04:20:10.82Z" }, + { url = "https://files.pythonhosted.org/packages/5a/5f/a1b72ff7139e4f89014e8d451442c74a774d5c43cd938fb0a9f878576b37/pillow-12.1.1-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:89c7e895002bbe49cdc5426150377cbbc04767d7547ed145473f496dfa40408b", size = 8027678, upload-time = "2026-02-11T04:20:12.455Z" }, + { url = "https://files.pythonhosted.org/packages/e2/c2/c7cb187dac79a3d22c3ebeae727abee01e077c8c7d930791dc592f335153/pillow-12.1.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a5cbdcddad0af3da87cb16b60d23648bc3b51967eb07223e9fed77a82b457c4", size = 6335777, upload-time = "2026-02-11T04:20:14.441Z" }, + { url = "https://files.pythonhosted.org/packages/0c/7b/f9b09a7804ec7336effb96c26d37c29d27225783dc1501b7d62dcef6ae25/pillow-12.1.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9f51079765661884a486727f0729d29054242f74b46186026582b4e4769918e4", size = 7027140, upload-time = "2026-02-11T04:20:16.387Z" }, + { url = "https://files.pythonhosted.org/packages/98/b2/2fa3c391550bd421b10849d1a2144c44abcd966daadd2f7c12e19ea988c4/pillow-12.1.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:99c1506ea77c11531d75e3a412832a13a71c7ebc8192ab9e4b2e355555920e3e", size = 6449855, upload-time = "2026-02-11T04:20:18.554Z" }, + { url = "https://files.pythonhosted.org/packages/96/ff/9caf4b5b950c669263c39e96c78c0d74a342c71c4f43fd031bb5cb7ceac9/pillow-12.1.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:36341d06738a9f66c8287cf8b876d24b18db9bd8740fa0672c74e259ad408cff", size = 7151329, upload-time = "2026-02-11T04:20:20.646Z" }, + { url = "https://files.pythonhosted.org/packages/7b/f8/4b24841f582704da675ca535935bccb32b00a6da1226820845fac4a71136/pillow-12.1.1-cp310-cp310-win32.whl", hash = "sha256:6c52f062424c523d6c4db85518774cc3d50f5539dd6eed32b8f6229b26f24d40", size = 6325574, upload-time = "2026-02-11T04:20:22.43Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f9/9f6b01c0881d7036063aa6612ef04c0e2cad96be21325a1e92d0203f8e91/pillow-12.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:c6008de247150668a705a6338156efb92334113421ceecf7438a12c9a12dab23", size = 7032347, upload-time = "2026-02-11T04:20:23.932Z" }, + { url = "https://files.pythonhosted.org/packages/79/13/c7922edded3dcdaf10c59297540b72785620abc0538872c819915746757d/pillow-12.1.1-cp310-cp310-win_arm64.whl", hash = "sha256:1a9b0ee305220b392e1124a764ee4265bd063e54a751a6b62eff69992f457fa9", size = 2453457, upload-time = "2026-02-11T04:20:25.392Z" }, + { url = "https://files.pythonhosted.org/packages/2b/46/5da1ec4a5171ee7bf1a0efa064aba70ba3d6e0788ce3f5acd1375d23c8c0/pillow-12.1.1-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e879bb6cd5c73848ef3b2b48b8af9ff08c5b71ecda8048b7dd22d8a33f60be32", size = 5304084, upload-time = "2026-02-11T04:20:27.501Z" }, + { url = "https://files.pythonhosted.org/packages/78/93/a29e9bc02d1cf557a834da780ceccd54e02421627200696fcf805ebdc3fb/pillow-12.1.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:365b10bb9417dd4498c0e3b128018c4a624dc11c7b97d8cc54effe3b096f4c38", size = 4657866, upload-time = "2026-02-11T04:20:29.827Z" }, + { url = "https://files.pythonhosted.org/packages/13/84/583a4558d492a179d31e4aae32eadce94b9acf49c0337c4ce0b70e0a01f2/pillow-12.1.1-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d4ce8e329c93845720cd2014659ca67eac35f6433fd3050393d85f3ecef0dad5", size = 6232148, upload-time = "2026-02-11T04:20:31.329Z" }, + { url = "https://files.pythonhosted.org/packages/d5/e2/53c43334bbbb2d3b938978532fbda8e62bb6e0b23a26ce8592f36bcc4987/pillow-12.1.1-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc354a04072b765eccf2204f588a7a532c9511e8b9c7f900e1b64e3e33487090", size = 8038007, upload-time = "2026-02-11T04:20:34.225Z" }, + { url = "https://files.pythonhosted.org/packages/b8/a6/3d0e79c8a9d58150dd98e199d7c1c56861027f3829a3a60b3c2784190180/pillow-12.1.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7e7976bf1910a8116b523b9f9f58bf410f3e8aa330cd9a2bb2953f9266ab49af", size = 6345418, upload-time = "2026-02-11T04:20:35.858Z" }, + { url = "https://files.pythonhosted.org/packages/a2/c8/46dfeac5825e600579157eea177be43e2f7ff4a99da9d0d0a49533509ac5/pillow-12.1.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:597bd9c8419bc7c6af5604e55847789b69123bbe25d65cc6ad3012b4f3c98d8b", size = 7034590, upload-time = "2026-02-11T04:20:37.91Z" }, + { url = "https://files.pythonhosted.org/packages/af/bf/e6f65d3db8a8bbfeaf9e13cc0417813f6319863a73de934f14b2229ada18/pillow-12.1.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2c1fc0f2ca5f96a3c8407e41cca26a16e46b21060fe6d5b099d2cb01412222f5", size = 6458655, upload-time = "2026-02-11T04:20:39.496Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c2/66091f3f34a25894ca129362e510b956ef26f8fb67a0e6417bc5744e56f1/pillow-12.1.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:578510d88c6229d735855e1f278aa305270438d36a05031dfaae5067cc8eb04d", size = 7159286, upload-time = "2026-02-11T04:20:41.139Z" }, + { url = "https://files.pythonhosted.org/packages/7b/5a/24bc8eb526a22f957d0cec6243146744966d40857e3d8deb68f7902ca6c1/pillow-12.1.1-cp311-cp311-win32.whl", hash = "sha256:7311c0a0dcadb89b36b7025dfd8326ecfa36964e29913074d47382706e516a7c", size = 6328663, upload-time = "2026-02-11T04:20:43.184Z" }, + { url = "https://files.pythonhosted.org/packages/31/03/bef822e4f2d8f9d7448c133d0a18185d3cce3e70472774fffefe8b0ed562/pillow-12.1.1-cp311-cp311-win_amd64.whl", hash = "sha256:fbfa2a7c10cc2623f412753cddf391c7f971c52ca40a3f65dc5039b2939e8563", size = 7031448, upload-time = "2026-02-11T04:20:44.696Z" }, + { url = "https://files.pythonhosted.org/packages/49/70/f76296f53610bd17b2e7d31728b8b7825e3ac3b5b3688b51f52eab7c0818/pillow-12.1.1-cp311-cp311-win_arm64.whl", hash = "sha256:b81b5e3511211631b3f672a595e3221252c90af017e399056d0faabb9538aa80", size = 2453651, upload-time = "2026-02-11T04:20:46.243Z" }, + { url = "https://files.pythonhosted.org/packages/07/d3/8df65da0d4df36b094351dce696f2989bec731d4f10e743b1c5f4da4d3bf/pillow-12.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab323b787d6e18b3d91a72fc99b1a2c28651e4358749842b8f8dfacd28ef2052", size = 5262803, upload-time = "2026-02-11T04:20:47.653Z" }, + { url = "https://files.pythonhosted.org/packages/d6/71/5026395b290ff404b836e636f51d7297e6c83beceaa87c592718747e670f/pillow-12.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:adebb5bee0f0af4909c30db0d890c773d1a92ffe83da908e2e9e720f8edf3984", size = 4657601, upload-time = "2026-02-11T04:20:49.328Z" }, + { url = "https://files.pythonhosted.org/packages/b1/2e/1001613d941c67442f745aff0f7cc66dd8df9a9c084eb497e6a543ee6f7e/pillow-12.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:bb66b7cc26f50977108790e2456b7921e773f23db5630261102233eb355a3b79", size = 6234995, upload-time = "2026-02-11T04:20:51.032Z" }, + { url = "https://files.pythonhosted.org/packages/07/26/246ab11455b2549b9233dbd44d358d033a2f780fa9007b61a913c5b2d24e/pillow-12.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:aee2810642b2898bb187ced9b349e95d2a7272930796e022efaf12e99dccd293", size = 8045012, upload-time = "2026-02-11T04:20:52.882Z" }, + { url = "https://files.pythonhosted.org/packages/b2/8b/07587069c27be7535ac1fe33874e32de118fbd34e2a73b7f83436a88368c/pillow-12.1.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a0b1cd6232e2b618adcc54d9882e4e662a089d5768cd188f7c245b4c8c44a397", size = 6349638, upload-time = "2026-02-11T04:20:54.444Z" }, + { url = "https://files.pythonhosted.org/packages/ff/79/6df7b2ee763d619cda2fb4fea498e5f79d984dae304d45a8999b80d6cf5c/pillow-12.1.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7aac39bcf8d4770d089588a2e1dd111cbaa42df5a94be3114222057d68336bd0", size = 7041540, upload-time = "2026-02-11T04:20:55.97Z" }, + { url = "https://files.pythonhosted.org/packages/2c/5e/2ba19e7e7236d7529f4d873bdaf317a318896bac289abebd4bb00ef247f0/pillow-12.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ab174cd7d29a62dd139c44bf74b698039328f45cb03b4596c43473a46656b2f3", size = 6462613, upload-time = "2026-02-11T04:20:57.542Z" }, + { url = "https://files.pythonhosted.org/packages/03/03/31216ec124bb5c3dacd74ce8efff4cc7f52643653bad4825f8f08c697743/pillow-12.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:339ffdcb7cbeaa08221cd401d517d4b1fe7a9ed5d400e4a8039719238620ca35", size = 7166745, upload-time = "2026-02-11T04:20:59.196Z" }, + { url = "https://files.pythonhosted.org/packages/1f/e7/7c4552d80052337eb28653b617eafdef39adfb137c49dd7e831b8dc13bc5/pillow-12.1.1-cp312-cp312-win32.whl", hash = "sha256:5d1f9575a12bed9e9eedd9a4972834b08c97a352bd17955ccdebfeca5913fa0a", size = 6328823, upload-time = "2026-02-11T04:21:01.385Z" }, + { url = "https://files.pythonhosted.org/packages/3d/17/688626d192d7261bbbf98846fc98995726bddc2c945344b65bec3a29d731/pillow-12.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:21329ec8c96c6e979cd0dfd29406c40c1d52521a90544463057d2aaa937d66a6", size = 7033367, upload-time = "2026-02-11T04:21:03.536Z" }, + { url = "https://files.pythonhosted.org/packages/ed/fe/a0ef1f73f939b0eca03ee2c108d0043a87468664770612602c63266a43c4/pillow-12.1.1-cp312-cp312-win_arm64.whl", hash = "sha256:af9a332e572978f0218686636610555ae3defd1633597be015ed50289a03c523", size = 2453811, upload-time = "2026-02-11T04:21:05.116Z" }, + { url = "https://files.pythonhosted.org/packages/d5/11/6db24d4bd7685583caeae54b7009584e38da3c3d4488ed4cd25b439de486/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:d242e8ac078781f1de88bf823d70c1a9b3c7950a44cdf4b7c012e22ccbcd8e4e", size = 4062689, upload-time = "2026-02-11T04:21:06.804Z" }, + { url = "https://files.pythonhosted.org/packages/33/c0/ce6d3b1fe190f0021203e0d9b5b99e57843e345f15f9ef22fcd43842fd21/pillow-12.1.1-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:02f84dfad02693676692746df05b89cf25597560db2857363a208e393429f5e9", size = 4138535, upload-time = "2026-02-11T04:21:08.452Z" }, + { url = "https://files.pythonhosted.org/packages/a0/c6/d5eb6a4fb32a3f9c21a8c7613ec706534ea1cf9f4b3663e99f0d83f6fca8/pillow-12.1.1-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:e65498daf4b583091ccbb2556c7000abf0f3349fcd57ef7adc9a84a394ed29f6", size = 3601364, upload-time = "2026-02-11T04:21:10.194Z" }, + { url = "https://files.pythonhosted.org/packages/14/a1/16c4b823838ba4c9c52c0e6bbda903a3fe5a1bdbf1b8eb4fff7156f3e318/pillow-12.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6c6db3b84c87d48d0088943bf33440e0c42370b99b1c2a7989216f7b42eede60", size = 5262561, upload-time = "2026-02-11T04:21:11.742Z" }, + { url = "https://files.pythonhosted.org/packages/bb/ad/ad9dc98ff24f485008aa5cdedaf1a219876f6f6c42a4626c08bc4e80b120/pillow-12.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:8b7e5304e34942bf62e15184219a7b5ad4ff7f3bb5cca4d984f37df1a0e1aee2", size = 4657460, upload-time = "2026-02-11T04:21:13.786Z" }, + { url = "https://files.pythonhosted.org/packages/9e/1b/f1a4ea9a895b5732152789326202a82464d5254759fbacae4deea3069334/pillow-12.1.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:18e5bddd742a44b7e6b1e773ab5db102bd7a94c32555ba656e76d319d19c3850", size = 6232698, upload-time = "2026-02-11T04:21:15.949Z" }, + { url = "https://files.pythonhosted.org/packages/95/f4/86f51b8745070daf21fd2e5b1fe0eb35d4db9ca26e6d58366562fb56a743/pillow-12.1.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc44ef1f3de4f45b50ccf9136999d71abb99dca7706bc75d222ed350b9fd2289", size = 8041706, upload-time = "2026-02-11T04:21:17.723Z" }, + { url = "https://files.pythonhosted.org/packages/29/9b/d6ecd956bb1266dd1045e995cce9b8d77759e740953a1c9aad9502a0461e/pillow-12.1.1-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5a8eb7ed8d4198bccbd07058416eeec51686b498e784eda166395a23eb99138e", size = 6346621, upload-time = "2026-02-11T04:21:19.547Z" }, + { url = "https://files.pythonhosted.org/packages/71/24/538bff45bde96535d7d998c6fed1a751c75ac7c53c37c90dc2601b243893/pillow-12.1.1-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:47b94983da0c642de92ced1702c5b6c292a84bd3a8e1d1702ff923f183594717", size = 7038069, upload-time = "2026-02-11T04:21:21.378Z" }, + { url = "https://files.pythonhosted.org/packages/94/0e/58cb1a6bc48f746bc4cb3adb8cabff73e2742c92b3bf7a220b7cf69b9177/pillow-12.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:518a48c2aab7ce596d3bf79d0e275661b846e86e4d0e7dec34712c30fe07f02a", size = 6460040, upload-time = "2026-02-11T04:21:23.148Z" }, + { url = "https://files.pythonhosted.org/packages/6c/57/9045cb3ff11eeb6c1adce3b2d60d7d299d7b273a2e6c8381a524abfdc474/pillow-12.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a550ae29b95c6dc13cf69e2c9dc5747f814c54eeb2e32d683e5e93af56caa029", size = 7164523, upload-time = "2026-02-11T04:21:25.01Z" }, + { url = "https://files.pythonhosted.org/packages/73/f2/9be9cb99f2175f0d4dbadd6616ce1bf068ee54a28277ea1bf1fbf729c250/pillow-12.1.1-cp313-cp313-win32.whl", hash = "sha256:a003d7422449f6d1e3a34e3dd4110c22148336918ddbfc6a32581cd54b2e0b2b", size = 6332552, upload-time = "2026-02-11T04:21:27.238Z" }, + { url = "https://files.pythonhosted.org/packages/3f/eb/b0834ad8b583d7d9d42b80becff092082a1c3c156bb582590fcc973f1c7c/pillow-12.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:344cf1e3dab3be4b1fa08e449323d98a2a3f819ad20f4b22e77a0ede31f0faa1", size = 7040108, upload-time = "2026-02-11T04:21:29.462Z" }, + { url = "https://files.pythonhosted.org/packages/d5/7d/fc09634e2aabdd0feabaff4a32f4a7d97789223e7c2042fd805ea4b4d2c2/pillow-12.1.1-cp313-cp313-win_arm64.whl", hash = "sha256:5c0dd1636633e7e6a0afe7bf6a51a14992b7f8e60de5789018ebbdfae55b040a", size = 2453712, upload-time = "2026-02-11T04:21:31.072Z" }, + { url = "https://files.pythonhosted.org/packages/19/2a/b9d62794fc8a0dd14c1943df68347badbd5511103e0d04c035ffe5cf2255/pillow-12.1.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0330d233c1a0ead844fc097a7d16c0abff4c12e856c0b325f231820fee1f39da", size = 5264880, upload-time = "2026-02-11T04:21:32.865Z" }, + { url = "https://files.pythonhosted.org/packages/26/9d/e03d857d1347fa5ed9247e123fcd2a97b6220e15e9cb73ca0a8d91702c6e/pillow-12.1.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5dae5f21afb91322f2ff791895ddd8889e5e947ff59f71b46041c8ce6db790bc", size = 4660616, upload-time = "2026-02-11T04:21:34.97Z" }, + { url = "https://files.pythonhosted.org/packages/f7/ec/8a6d22afd02570d30954e043f09c32772bfe143ba9285e2fdb11284952cd/pillow-12.1.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e0c664be47252947d870ac0d327fea7e63985a08794758aa8af5b6cb6ec0c9c", size = 6269008, upload-time = "2026-02-11T04:21:36.623Z" }, + { url = "https://files.pythonhosted.org/packages/3d/1d/6d875422c9f28a4a361f495a5f68d9de4a66941dc2c619103ca335fa6446/pillow-12.1.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:691ab2ac363b8217f7d31b3497108fb1f50faab2f75dfb03284ec2f217e87bf8", size = 8073226, upload-time = "2026-02-11T04:21:38.585Z" }, + { url = "https://files.pythonhosted.org/packages/a1/cd/134b0b6ee5eda6dc09e25e24b40fdafe11a520bc725c1d0bbaa5e00bf95b/pillow-12.1.1-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e9e8064fb1cc019296958595f6db671fba95209e3ceb0c4734c9baf97de04b20", size = 6380136, upload-time = "2026-02-11T04:21:40.562Z" }, + { url = "https://files.pythonhosted.org/packages/7a/a9/7628f013f18f001c1b98d8fffe3452f306a70dc6aba7d931019e0492f45e/pillow-12.1.1-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:472a8d7ded663e6162dafdf20015c486a7009483ca671cece7a9279b512fcb13", size = 7067129, upload-time = "2026-02-11T04:21:42.521Z" }, + { url = "https://files.pythonhosted.org/packages/1e/f8/66ab30a2193b277785601e82ee2d49f68ea575d9637e5e234faaa98efa4c/pillow-12.1.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:89b54027a766529136a06cfebeecb3a04900397a3590fd252160b888479517bf", size = 6491807, upload-time = "2026-02-11T04:21:44.22Z" }, + { url = "https://files.pythonhosted.org/packages/da/0b/a877a6627dc8318fdb84e357c5e1a758c0941ab1ddffdafd231983788579/pillow-12.1.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:86172b0831b82ce4f7877f280055892b31179e1576aa00d0df3bb1bbf8c3e524", size = 7190954, upload-time = "2026-02-11T04:21:46.114Z" }, + { url = "https://files.pythonhosted.org/packages/83/43/6f732ff85743cf746b1361b91665d9f5155e1483817f693f8d57ea93147f/pillow-12.1.1-cp313-cp313t-win32.whl", hash = "sha256:44ce27545b6efcf0fdbdceb31c9a5bdea9333e664cda58a7e674bb74608b3986", size = 6336441, upload-time = "2026-02-11T04:21:48.22Z" }, + { url = "https://files.pythonhosted.org/packages/3b/44/e865ef3986611bb75bfabdf94a590016ea327833f434558801122979cd0e/pillow-12.1.1-cp313-cp313t-win_amd64.whl", hash = "sha256:a285e3eb7a5a45a2ff504e31f4a8d1b12ef62e84e5411c6804a42197c1cf586c", size = 7045383, upload-time = "2026-02-11T04:21:50.015Z" }, + { url = "https://files.pythonhosted.org/packages/a8/c6/f4fb24268d0c6908b9f04143697ea18b0379490cb74ba9e8d41b898bd005/pillow-12.1.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cc7d296b5ea4d29e6570dabeaed58d31c3fea35a633a69679fb03d7664f43fb3", size = 2456104, upload-time = "2026-02-11T04:21:51.633Z" }, + { url = "https://files.pythonhosted.org/packages/03/d0/bebb3ffbf31c5a8e97241476c4cf8b9828954693ce6744b4a2326af3e16b/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphoneos.whl", hash = "sha256:417423db963cb4be8bac3fc1204fe61610f6abeed1580a7a2cbb2fbda20f12af", size = 4062652, upload-time = "2026-02-11T04:21:53.19Z" }, + { url = "https://files.pythonhosted.org/packages/2d/c0/0e16fb0addda4851445c28f8350d8c512f09de27bbb0d6d0bbf8b6709605/pillow-12.1.1-cp314-cp314-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:b957b71c6b2387610f556a7eb0828afbe40b4a98036fc0d2acfa5a44a0c2036f", size = 4138823, upload-time = "2026-02-11T04:22:03.088Z" }, + { url = "https://files.pythonhosted.org/packages/6b/fb/6170ec655d6f6bb6630a013dd7cf7bc218423d7b5fa9071bf63dc32175ae/pillow-12.1.1-cp314-cp314-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:097690ba1f2efdeb165a20469d59d8bb03c55fb6621eb2041a060ae8ea3e9642", size = 3601143, upload-time = "2026-02-11T04:22:04.909Z" }, + { url = "https://files.pythonhosted.org/packages/59/04/dc5c3f297510ba9a6837cbb318b87dd2b8f73eb41a43cc63767f65cb599c/pillow-12.1.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:2815a87ab27848db0321fb78c7f0b2c8649dee134b7f2b80c6a45c6831d75ccd", size = 5266254, upload-time = "2026-02-11T04:22:07.656Z" }, + { url = "https://files.pythonhosted.org/packages/05/30/5db1236b0d6313f03ebf97f5e17cda9ca060f524b2fcc875149a8360b21c/pillow-12.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f7ed2c6543bad5a7d5530eb9e78c53132f93dfa44a28492db88b41cdab885202", size = 4657499, upload-time = "2026-02-11T04:22:09.613Z" }, + { url = "https://files.pythonhosted.org/packages/6f/18/008d2ca0eb612e81968e8be0bbae5051efba24d52debf930126d7eaacbba/pillow-12.1.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:652a2c9ccfb556235b2b501a3a7cf3742148cd22e04b5625c5fe057ea3e3191f", size = 6232137, upload-time = "2026-02-11T04:22:11.434Z" }, + { url = "https://files.pythonhosted.org/packages/70/f1/f14d5b8eeb4b2cd62b9f9f847eb6605f103df89ef619ac68f92f748614ea/pillow-12.1.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6e4571eedf43af33d0fc233a382a76e849badbccdf1ac438841308652a08e1f", size = 8042721, upload-time = "2026-02-11T04:22:13.321Z" }, + { url = "https://files.pythonhosted.org/packages/5a/d6/17824509146e4babbdabf04d8171491fa9d776f7061ff6e727522df9bd03/pillow-12.1.1-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b574c51cf7d5d62e9be37ba446224b59a2da26dc4c1bb2ecbe936a4fb1a7cb7f", size = 6347798, upload-time = "2026-02-11T04:22:15.449Z" }, + { url = "https://files.pythonhosted.org/packages/d1/ee/c85a38a9ab92037a75615aba572c85ea51e605265036e00c5b67dfafbfe2/pillow-12.1.1-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a37691702ed687799de29a518d63d4682d9016932db66d4e90c345831b02fb4e", size = 7039315, upload-time = "2026-02-11T04:22:17.24Z" }, + { url = "https://files.pythonhosted.org/packages/ec/f3/bc8ccc6e08a148290d7523bde4d9a0d6c981db34631390dc6e6ec34cacf6/pillow-12.1.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f95c00d5d6700b2b890479664a06e754974848afaae5e21beb4d83c106923fd0", size = 6462360, upload-time = "2026-02-11T04:22:19.111Z" }, + { url = "https://files.pythonhosted.org/packages/f6/ab/69a42656adb1d0665ab051eec58a41f169ad295cf81ad45406963105408f/pillow-12.1.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:559b38da23606e68681337ad74622c4dbba02254fc9cb4488a305dd5975c7eeb", size = 7165438, upload-time = "2026-02-11T04:22:21.041Z" }, + { url = "https://files.pythonhosted.org/packages/02/46/81f7aa8941873f0f01d4b55cc543b0a3d03ec2ee30d617a0448bf6bd6dec/pillow-12.1.1-cp314-cp314-win32.whl", hash = "sha256:03edcc34d688572014ff223c125a3f77fb08091e4607e7745002fc214070b35f", size = 6431503, upload-time = "2026-02-11T04:22:22.833Z" }, + { url = "https://files.pythonhosted.org/packages/40/72/4c245f7d1044b67affc7f134a09ea619d4895333d35322b775b928180044/pillow-12.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:50480dcd74fa63b8e78235957d302d98d98d82ccbfac4c7e12108ba9ecbdba15", size = 7176748, upload-time = "2026-02-11T04:22:24.64Z" }, + { url = "https://files.pythonhosted.org/packages/e4/ad/8a87bdbe038c5c698736e3348af5c2194ffb872ea52f11894c95f9305435/pillow-12.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:5cb1785d97b0c3d1d1a16bc1d710c4a0049daefc4935f3a8f31f827f4d3d2e7f", size = 2544314, upload-time = "2026-02-11T04:22:26.685Z" }, + { url = "https://files.pythonhosted.org/packages/6c/9d/efd18493f9de13b87ede7c47e69184b9e859e4427225ea962e32e56a49bc/pillow-12.1.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:1f90cff8aa76835cba5769f0b3121a22bd4eb9e6884cfe338216e557a9a548b8", size = 5268612, upload-time = "2026-02-11T04:22:29.884Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f1/4f42eb2b388eb2ffc660dcb7f7b556c1015c53ebd5f7f754965ef997585b/pillow-12.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1f1be78ce9466a7ee64bfda57bdba0f7cc499d9794d518b854816c41bf0aa4e9", size = 4660567, upload-time = "2026-02-11T04:22:31.799Z" }, + { url = "https://files.pythonhosted.org/packages/01/54/df6ef130fa43e4b82e32624a7b821a2be1c5653a5fdad8469687a7db4e00/pillow-12.1.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:42fc1f4677106188ad9a55562bbade416f8b55456f522430fadab3cef7cd4e60", size = 6269951, upload-time = "2026-02-11T04:22:33.921Z" }, + { url = "https://files.pythonhosted.org/packages/a9/48/618752d06cc44bb4aae8ce0cd4e6426871929ed7b46215638088270d9b34/pillow-12.1.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:98edb152429ab62a1818039744d8fbb3ccab98a7c29fc3d5fcef158f3f1f68b7", size = 8074769, upload-time = "2026-02-11T04:22:35.877Z" }, + { url = "https://files.pythonhosted.org/packages/c3/bd/f1d71eb39a72fa088d938655afba3e00b38018d052752f435838961127d8/pillow-12.1.1-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d470ab1178551dd17fdba0fef463359c41aaa613cdcd7ff8373f54be629f9f8f", size = 6381358, upload-time = "2026-02-11T04:22:37.698Z" }, + { url = "https://files.pythonhosted.org/packages/64/ef/c784e20b96674ed36a5af839305f55616f8b4f8aa8eeccf8531a6e312243/pillow-12.1.1-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6408a7b064595afcab0a49393a413732a35788f2a5092fdc6266952ed67de586", size = 7068558, upload-time = "2026-02-11T04:22:39.597Z" }, + { url = "https://files.pythonhosted.org/packages/73/cb/8059688b74422ae61278202c4e1ad992e8a2e7375227be0a21c6b87ca8d5/pillow-12.1.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5d8c41325b382c07799a3682c1c258469ea2ff97103c53717b7893862d0c98ce", size = 6493028, upload-time = "2026-02-11T04:22:42.73Z" }, + { url = "https://files.pythonhosted.org/packages/c6/da/e3c008ed7d2dd1f905b15949325934510b9d1931e5df999bb15972756818/pillow-12.1.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c7697918b5be27424e9ce568193efd13d925c4481dd364e43f5dff72d33e10f8", size = 7191940, upload-time = "2026-02-11T04:22:44.543Z" }, + { url = "https://files.pythonhosted.org/packages/01/4a/9202e8d11714c1fc5951f2e1ef362f2d7fbc595e1f6717971d5dd750e969/pillow-12.1.1-cp314-cp314t-win32.whl", hash = "sha256:d2912fd8114fc5545aa3a4b5576512f64c55a03f3ebcca4c10194d593d43ea36", size = 6438736, upload-time = "2026-02-11T04:22:46.347Z" }, + { url = "https://files.pythonhosted.org/packages/f3/ca/cbce2327eb9885476b3957b2e82eb12c866a8b16ad77392864ad601022ce/pillow-12.1.1-cp314-cp314t-win_amd64.whl", hash = "sha256:4ceb838d4bd9dab43e06c363cab2eebf63846d6a4aeaea283bbdfd8f1a8ed58b", size = 7182894, upload-time = "2026-02-11T04:22:48.114Z" }, + { url = "https://files.pythonhosted.org/packages/ec/d2/de599c95ba0a973b94410477f8bf0b6f0b5e67360eb89bcb1ad365258beb/pillow-12.1.1-cp314-cp314t-win_arm64.whl", hash = "sha256:7b03048319bfc6170e93bd60728a1af51d3dd7704935feb228c4d4faab35d334", size = 2546446, upload-time = "2026-02-11T04:22:50.342Z" }, + { url = "https://files.pythonhosted.org/packages/56/11/5d43209aa4cb58e0cc80127956ff1796a68b928e6324bbf06ef4db34367b/pillow-12.1.1-pp311-pypy311_pp73-macosx_10_15_x86_64.whl", hash = "sha256:600fd103672b925fe62ed08e0d874ea34d692474df6f4bf7ebe148b30f89f39f", size = 5228606, upload-time = "2026-02-11T04:22:52.106Z" }, + { url = "https://files.pythonhosted.org/packages/5f/d5/3b005b4e4fda6698b371fa6c21b097d4707585d7db99e98d9b0b87ac612a/pillow-12.1.1-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:665e1b916b043cef294bc54d47bf02d87e13f769bc4bc5fa225a24b3a6c5aca9", size = 4622321, upload-time = "2026-02-11T04:22:53.827Z" }, + { url = "https://files.pythonhosted.org/packages/df/36/ed3ea2d594356fd8037e5a01f6156c74bc8d92dbb0fa60746cc96cabb6e8/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:495c302af3aad1ca67420ddd5c7bd480c8867ad173528767d906428057a11f0e", size = 5247579, upload-time = "2026-02-11T04:22:56.094Z" }, + { url = "https://files.pythonhosted.org/packages/54/9a/9cc3e029683cf6d20ae5085da0dafc63148e3252c2f13328e553aaa13cfb/pillow-12.1.1-pp311-pypy311_pp73-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8fd420ef0c52c88b5a035a0886f367748c72147b2b8f384c9d12656678dfdfa9", size = 6989094, upload-time = "2026-02-11T04:22:58.288Z" }, + { url = "https://files.pythonhosted.org/packages/00/98/fc53ab36da80b88df0967896b6c4b4cd948a0dc5aa40a754266aa3ae48b3/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f975aa7ef9684ce7e2c18a3aa8f8e2106ce1e46b94ab713d156b2898811651d3", size = 5313850, upload-time = "2026-02-11T04:23:00.554Z" }, + { url = "https://files.pythonhosted.org/packages/30/02/00fa585abfd9fe9d73e5f6e554dc36cc2b842898cbfc46d70353dae227f8/pillow-12.1.1-pp311-pypy311_pp73-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8089c852a56c2966cf18835db62d9b34fef7ba74c726ad943928d494fa7f4735", size = 5963343, upload-time = "2026-02-11T04:23:02.934Z" }, + { url = "https://files.pythonhosted.org/packages/f2/26/c56ce33ca856e358d27fda9676c055395abddb82c35ac0f593877ed4562e/pillow-12.1.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cb9bb857b2d057c6dfc72ac5f3b44836924ba15721882ef103cecb40d002d80e", size = 7029880, upload-time = "2026-02-11T04:23:04.783Z" }, +] + [[package]] name = "pluggy" version = "1.6.0" @@ -2100,6 +2222,65 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/bd/d3/254cea30f918f489db09d6a8435a7de7047f8cb68584477a515f160541d6/watchfiles-1.1.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:923fec6e5461c42bd7e3fd5ec37492c6f3468be0499bc0707b4bbbc16ac21792", size = 454009, upload-time = "2025-06-15T19:06:52.896Z" }, ] +[[package]] +name = "websockets" +version = "15.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/e6/26d09fab466b7ca9c7737474c52be4f76a40301b08362eb2dbc19dcc16c1/websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee", size = 177016, upload-time = "2025-03-05T20:03:41.606Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/da/6462a9f510c0c49837bbc9345aca92d767a56c1fb2939e1579df1e1cdcf7/websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b", size = 175423, upload-time = "2025-03-05T20:01:35.363Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9f/9d11c1a4eb046a9e106483b9ff69bce7ac880443f00e5ce64261b47b07e7/websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205", size = 173080, upload-time = "2025-03-05T20:01:37.304Z" }, + { url = "https://files.pythonhosted.org/packages/d5/4f/b462242432d93ea45f297b6179c7333dd0402b855a912a04e7fc61c0d71f/websockets-15.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5756779642579d902eed757b21b0164cd6fe338506a8083eb58af5c372e39d9a", size = 173329, upload-time = "2025-03-05T20:01:39.668Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0c/6afa1f4644d7ed50284ac59cc70ef8abd44ccf7d45850d989ea7310538d0/websockets-15.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fdfe3e2a29e4db3659dbd5bbf04560cea53dd9610273917799f1cde46aa725e", size = 182312, upload-time = "2025-03-05T20:01:41.815Z" }, + { url = "https://files.pythonhosted.org/packages/dd/d4/ffc8bd1350b229ca7a4db2a3e1c482cf87cea1baccd0ef3e72bc720caeec/websockets-15.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4c2529b320eb9e35af0fa3016c187dffb84a3ecc572bcee7c3ce302bfeba52bf", size = 181319, upload-time = "2025-03-05T20:01:43.967Z" }, + { url = "https://files.pythonhosted.org/packages/97/3a/5323a6bb94917af13bbb34009fac01e55c51dfde354f63692bf2533ffbc2/websockets-15.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac1e5c9054fe23226fb11e05a6e630837f074174c4c2f0fe442996112a6de4fb", size = 181631, upload-time = "2025-03-05T20:01:46.104Z" }, + { url = "https://files.pythonhosted.org/packages/a6/cc/1aeb0f7cee59ef065724041bb7ed667b6ab1eeffe5141696cccec2687b66/websockets-15.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5df592cd503496351d6dc14f7cdad49f268d8e618f80dce0cd5a36b93c3fc08d", size = 182016, upload-time = "2025-03-05T20:01:47.603Z" }, + { url = "https://files.pythonhosted.org/packages/79/f9/c86f8f7af208e4161a7f7e02774e9d0a81c632ae76db2ff22549e1718a51/websockets-15.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0a34631031a8f05657e8e90903e656959234f3a04552259458aac0b0f9ae6fd9", size = 181426, upload-time = "2025-03-05T20:01:48.949Z" }, + { url = "https://files.pythonhosted.org/packages/c7/b9/828b0bc6753db905b91df6ae477c0b14a141090df64fb17f8a9d7e3516cf/websockets-15.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3d00075aa65772e7ce9e990cab3ff1de702aa09be3940d1dc88d5abf1ab8a09c", size = 181360, upload-time = "2025-03-05T20:01:50.938Z" }, + { url = "https://files.pythonhosted.org/packages/89/fb/250f5533ec468ba6327055b7d98b9df056fb1ce623b8b6aaafb30b55d02e/websockets-15.0.1-cp310-cp310-win32.whl", hash = "sha256:1234d4ef35db82f5446dca8e35a7da7964d02c127b095e172e54397fb6a6c256", size = 176388, upload-time = "2025-03-05T20:01:52.213Z" }, + { url = "https://files.pythonhosted.org/packages/1c/46/aca7082012768bb98e5608f01658ff3ac8437e563eca41cf068bd5849a5e/websockets-15.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:39c1fec2c11dc8d89bba6b2bf1556af381611a173ac2b511cf7231622058af41", size = 176830, upload-time = "2025-03-05T20:01:53.922Z" }, + { url = "https://files.pythonhosted.org/packages/9f/32/18fcd5919c293a398db67443acd33fde142f283853076049824fc58e6f75/websockets-15.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:823c248b690b2fd9303ba00c4f66cd5e2d8c3ba4aa968b2779be9532a4dad431", size = 175423, upload-time = "2025-03-05T20:01:56.276Z" }, + { url = "https://files.pythonhosted.org/packages/76/70/ba1ad96b07869275ef42e2ce21f07a5b0148936688c2baf7e4a1f60d5058/websockets-15.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678999709e68425ae2593acf2e3ebcbcf2e69885a5ee78f9eb80e6e371f1bf57", size = 173082, upload-time = "2025-03-05T20:01:57.563Z" }, + { url = "https://files.pythonhosted.org/packages/86/f2/10b55821dd40eb696ce4704a87d57774696f9451108cff0d2824c97e0f97/websockets-15.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d50fd1ee42388dcfb2b3676132c78116490976f1300da28eb629272d5d93e905", size = 173330, upload-time = "2025-03-05T20:01:59.063Z" }, + { url = "https://files.pythonhosted.org/packages/a5/90/1c37ae8b8a113d3daf1065222b6af61cc44102da95388ac0018fcb7d93d9/websockets-15.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d99e5546bf73dbad5bf3547174cd6cb8ba7273062a23808ffea025ecb1cf8562", size = 182878, upload-time = "2025-03-05T20:02:00.305Z" }, + { url = "https://files.pythonhosted.org/packages/8e/8d/96e8e288b2a41dffafb78e8904ea7367ee4f891dafc2ab8d87e2124cb3d3/websockets-15.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:66dd88c918e3287efc22409d426c8f729688d89a0c587c88971a0faa2c2f3792", size = 181883, upload-time = "2025-03-05T20:02:03.148Z" }, + { url = "https://files.pythonhosted.org/packages/93/1f/5d6dbf551766308f6f50f8baf8e9860be6182911e8106da7a7f73785f4c4/websockets-15.0.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8dd8327c795b3e3f219760fa603dcae1dcc148172290a8ab15158cf85a953413", size = 182252, upload-time = "2025-03-05T20:02:05.29Z" }, + { url = "https://files.pythonhosted.org/packages/d4/78/2d4fed9123e6620cbf1706c0de8a1632e1a28e7774d94346d7de1bba2ca3/websockets-15.0.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8fdc51055e6ff4adeb88d58a11042ec9a5eae317a0a53d12c062c8a8865909e8", size = 182521, upload-time = "2025-03-05T20:02:07.458Z" }, + { url = "https://files.pythonhosted.org/packages/e7/3b/66d4c1b444dd1a9823c4a81f50231b921bab54eee2f69e70319b4e21f1ca/websockets-15.0.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:693f0192126df6c2327cce3baa7c06f2a117575e32ab2308f7f8216c29d9e2e3", size = 181958, upload-time = "2025-03-05T20:02:09.842Z" }, + { url = "https://files.pythonhosted.org/packages/08/ff/e9eed2ee5fed6f76fdd6032ca5cd38c57ca9661430bb3d5fb2872dc8703c/websockets-15.0.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:54479983bd5fb469c38f2f5c7e3a24f9a4e70594cd68cd1fa6b9340dadaff7cf", size = 181918, upload-time = "2025-03-05T20:02:11.968Z" }, + { url = "https://files.pythonhosted.org/packages/d8/75/994634a49b7e12532be6a42103597b71098fd25900f7437d6055ed39930a/websockets-15.0.1-cp311-cp311-win32.whl", hash = "sha256:16b6c1b3e57799b9d38427dda63edcbe4926352c47cf88588c0be4ace18dac85", size = 176388, upload-time = "2025-03-05T20:02:13.32Z" }, + { url = "https://files.pythonhosted.org/packages/98/93/e36c73f78400a65f5e236cd376713c34182e6663f6889cd45a4a04d8f203/websockets-15.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:27ccee0071a0e75d22cb35849b1db43f2ecd3e161041ac1ee9d2352ddf72f065", size = 176828, upload-time = "2025-03-05T20:02:14.585Z" }, + { url = "https://files.pythonhosted.org/packages/51/6b/4545a0d843594f5d0771e86463606a3988b5a09ca5123136f8a76580dd63/websockets-15.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3e90baa811a5d73f3ca0bcbf32064d663ed81318ab225ee4f427ad4e26e5aff3", size = 175437, upload-time = "2025-03-05T20:02:16.706Z" }, + { url = "https://files.pythonhosted.org/packages/f4/71/809a0f5f6a06522af902e0f2ea2757f71ead94610010cf570ab5c98e99ed/websockets-15.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:592f1a9fe869c778694f0aa806ba0374e97648ab57936f092fd9d87f8bc03665", size = 173096, upload-time = "2025-03-05T20:02:18.832Z" }, + { url = "https://files.pythonhosted.org/packages/3d/69/1a681dd6f02180916f116894181eab8b2e25b31e484c5d0eae637ec01f7c/websockets-15.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0701bc3cfcb9164d04a14b149fd74be7347a530ad3bbf15ab2c678a2cd3dd9a2", size = 173332, upload-time = "2025-03-05T20:02:20.187Z" }, + { url = "https://files.pythonhosted.org/packages/a6/02/0073b3952f5bce97eafbb35757f8d0d54812b6174ed8dd952aa08429bcc3/websockets-15.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e8b56bdcdb4505c8078cb6c7157d9811a85790f2f2b3632c7d1462ab5783d215", size = 183152, upload-time = "2025-03-05T20:02:22.286Z" }, + { url = "https://files.pythonhosted.org/packages/74/45/c205c8480eafd114b428284840da0b1be9ffd0e4f87338dc95dc6ff961a1/websockets-15.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0af68c55afbd5f07986df82831c7bff04846928ea8d1fd7f30052638788bc9b5", size = 182096, upload-time = "2025-03-05T20:02:24.368Z" }, + { url = "https://files.pythonhosted.org/packages/14/8f/aa61f528fba38578ec553c145857a181384c72b98156f858ca5c8e82d9d3/websockets-15.0.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64dee438fed052b52e4f98f76c5790513235efaa1ef7f3f2192c392cd7c91b65", size = 182523, upload-time = "2025-03-05T20:02:25.669Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6d/0267396610add5bc0d0d3e77f546d4cd287200804fe02323797de77dbce9/websockets-15.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d5f6b181bb38171a8ad1d6aa58a67a6aa9d4b38d0f8c5f496b9e42561dfc62fe", size = 182790, upload-time = "2025-03-05T20:02:26.99Z" }, + { url = "https://files.pythonhosted.org/packages/02/05/c68c5adbf679cf610ae2f74a9b871ae84564462955d991178f95a1ddb7dd/websockets-15.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5d54b09eba2bada6011aea5375542a157637b91029687eb4fdb2dab11059c1b4", size = 182165, upload-time = "2025-03-05T20:02:30.291Z" }, + { url = "https://files.pythonhosted.org/packages/29/93/bb672df7b2f5faac89761cb5fa34f5cec45a4026c383a4b5761c6cea5c16/websockets-15.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3be571a8b5afed347da347bfcf27ba12b069d9d7f42cb8c7028b5e98bbb12597", size = 182160, upload-time = "2025-03-05T20:02:31.634Z" }, + { url = "https://files.pythonhosted.org/packages/ff/83/de1f7709376dc3ca9b7eeb4b9a07b4526b14876b6d372a4dc62312bebee0/websockets-15.0.1-cp312-cp312-win32.whl", hash = "sha256:c338ffa0520bdb12fbc527265235639fb76e7bc7faafbb93f6ba80d9c06578a9", size = 176395, upload-time = "2025-03-05T20:02:33.017Z" }, + { url = "https://files.pythonhosted.org/packages/7d/71/abf2ebc3bbfa40f391ce1428c7168fb20582d0ff57019b69ea20fa698043/websockets-15.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcd5cf9e305d7b8338754470cf69cf81f420459dbae8a3b40cee57417f4614a7", size = 176841, upload-time = "2025-03-05T20:02:34.498Z" }, + { url = "https://files.pythonhosted.org/packages/cb/9f/51f0cf64471a9d2b4d0fc6c534f323b664e7095640c34562f5182e5a7195/websockets-15.0.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee443ef070bb3b6ed74514f5efaa37a252af57c90eb33b956d35c8e9c10a1931", size = 175440, upload-time = "2025-03-05T20:02:36.695Z" }, + { url = "https://files.pythonhosted.org/packages/8a/05/aa116ec9943c718905997412c5989f7ed671bc0188ee2ba89520e8765d7b/websockets-15.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5a939de6b7b4e18ca683218320fc67ea886038265fd1ed30173f5ce3f8e85675", size = 173098, upload-time = "2025-03-05T20:02:37.985Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0b/33cef55ff24f2d92924923c99926dcce78e7bd922d649467f0eda8368923/websockets-15.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:746ee8dba912cd6fc889a8147168991d50ed70447bf18bcda7039f7d2e3d9151", size = 173329, upload-time = "2025-03-05T20:02:39.298Z" }, + { url = "https://files.pythonhosted.org/packages/31/1d/063b25dcc01faa8fada1469bdf769de3768b7044eac9d41f734fd7b6ad6d/websockets-15.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:595b6c3969023ecf9041b2936ac3827e4623bfa3ccf007575f04c5a6aa318c22", size = 183111, upload-time = "2025-03-05T20:02:40.595Z" }, + { url = "https://files.pythonhosted.org/packages/93/53/9a87ee494a51bf63e4ec9241c1ccc4f7c2f45fff85d5bde2ff74fcb68b9e/websockets-15.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3c714d2fc58b5ca3e285461a4cc0c9a66bd0e24c5da9911e30158286c9b5be7f", size = 182054, upload-time = "2025-03-05T20:02:41.926Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b2/83a6ddf56cdcbad4e3d841fcc55d6ba7d19aeb89c50f24dd7e859ec0805f/websockets-15.0.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f3c1e2ab208db911594ae5b4f79addeb3501604a165019dd221c0bdcabe4db8", size = 182496, upload-time = "2025-03-05T20:02:43.304Z" }, + { url = "https://files.pythonhosted.org/packages/98/41/e7038944ed0abf34c45aa4635ba28136f06052e08fc2168520bb8b25149f/websockets-15.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:229cf1d3ca6c1804400b0a9790dc66528e08a6a1feec0d5040e8b9eb14422375", size = 182829, upload-time = "2025-03-05T20:02:48.812Z" }, + { url = "https://files.pythonhosted.org/packages/e0/17/de15b6158680c7623c6ef0db361da965ab25d813ae54fcfeae2e5b9ef910/websockets-15.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:756c56e867a90fb00177d530dca4b097dd753cde348448a1012ed6c5131f8b7d", size = 182217, upload-time = "2025-03-05T20:02:50.14Z" }, + { url = "https://files.pythonhosted.org/packages/33/2b/1f168cb6041853eef0362fb9554c3824367c5560cbdaad89ac40f8c2edfc/websockets-15.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:558d023b3df0bffe50a04e710bc87742de35060580a293c2a984299ed83bc4e4", size = 182195, upload-time = "2025-03-05T20:02:51.561Z" }, + { url = "https://files.pythonhosted.org/packages/86/eb/20b6cdf273913d0ad05a6a14aed4b9a85591c18a987a3d47f20fa13dcc47/websockets-15.0.1-cp313-cp313-win32.whl", hash = "sha256:ba9e56e8ceeeedb2e080147ba85ffcd5cd0711b89576b83784d8605a7df455fa", size = 176393, upload-time = "2025-03-05T20:02:53.814Z" }, + { url = "https://files.pythonhosted.org/packages/1b/6c/c65773d6cab416a64d191d6ee8a8b1c68a09970ea6909d16965d26bfed1e/websockets-15.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:e09473f095a819042ecb2ab9465aee615bd9c2028e4ef7d933600a8401c79561", size = 176837, upload-time = "2025-03-05T20:02:55.237Z" }, + { url = "https://files.pythonhosted.org/packages/02/9e/d40f779fa16f74d3468357197af8d6ad07e7c5a27ea1ca74ceb38986f77a/websockets-15.0.1-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:0c9e74d766f2818bb95f84c25be4dea09841ac0f734d1966f415e4edfc4ef1c3", size = 173109, upload-time = "2025-03-05T20:03:17.769Z" }, + { url = "https://files.pythonhosted.org/packages/bc/cd/5b887b8585a593073fd92f7c23ecd3985cd2c3175025a91b0d69b0551372/websockets-15.0.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1009ee0c7739c08a0cd59de430d6de452a55e42d6b522de7aa15e6f67db0b8e1", size = 173343, upload-time = "2025-03-05T20:03:19.094Z" }, + { url = "https://files.pythonhosted.org/packages/fe/ae/d34f7556890341e900a95acf4886833646306269f899d58ad62f588bf410/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d1f20b1c7a2fa82367e04982e708723ba0e7b8d43aa643d3dcd404d74f1475", size = 174599, upload-time = "2025-03-05T20:03:21.1Z" }, + { url = "https://files.pythonhosted.org/packages/71/e6/5fd43993a87db364ec60fc1d608273a1a465c0caba69176dd160e197ce42/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f29d80eb9a9263b8d109135351caf568cc3f80b9928bccde535c235de55c22d9", size = 174207, upload-time = "2025-03-05T20:03:23.221Z" }, + { url = "https://files.pythonhosted.org/packages/2b/fb/c492d6daa5ec067c2988ac80c61359ace5c4c674c532985ac5a123436cec/websockets-15.0.1-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b359ed09954d7c18bbc1680f380c7301f92c60bf924171629c5db97febb12f04", size = 174155, upload-time = "2025-03-05T20:03:25.321Z" }, + { url = "https://files.pythonhosted.org/packages/68/a1/dcb68430b1d00b698ae7a7e0194433bce4f07ded185f0ee5fb21e2a2e91e/websockets-15.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:cad21560da69f4ce7658ca2cb83138fb4cf695a2ba3e475e0559e05991aa8122", size = 176884, upload-time = "2025-03-05T20:03:27.934Z" }, + { url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743, upload-time = "2025-03-05T20:03:39.41Z" }, +] + [[package]] name = "yarl" version = "1.20.1" From 19895227c62fc3b15cd199641457e5fd025eb8bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nathana=C3=ABl=20HANNEBERT?= Date: Tue, 10 Mar 2026 18:12:47 +0100 Subject: [PATCH 2/4] fix(openai): use REST transcription to support non-realtime backends The openai plugin's stream() always connects via WebSocket to the /realtime endpoint, which is not implemented by all OpanAI compatible backends. Switch to recognize() which uses the standard REST/audio/transcriptions endpoint instead. Audio is segmented into speech utterances using energy-based silence detection (RMS threshold) before each recognize() call. Also fixes a NoneType crash in the Redis message handler that occurred when a message arrived before the agent had connected to the room. --- main.py | 2 +- openai_stt_agent.py | 111 +++++++++++++++++++------- tests/test_openai_stt_agent.py | 139 +++++++++++++++++---------------- 3 files changed, 153 insertions(+), 99 deletions(-) diff --git a/main.py b/main.py index b76b4f1..6683a5d 100644 --- a/main.py +++ b/main.py @@ -67,7 +67,7 @@ async def on_redis_message(message_data: str): meeting_id = routing.get("meetingId") user_id = routing.get("userId") - if meeting_id != agent.room.name: + if agent.room is None or meeting_id != agent.room.name: return if event_name == RedisManager.USER_SPEECH_LOCALE_CHANGED_EVT_MSG: diff --git a/openai_stt_agent.py b/openai_stt_agent.py index ea4941d..55169fb 100644 --- a/openai_stt_agent.py +++ b/openai_stt_agent.py @@ -2,6 +2,7 @@ import logging import time +import numpy as np from livekit import rtc from livekit.agents import ( AutoSubscribe, @@ -13,6 +14,14 @@ from config import OpenAiConfig from events import EventEmitter +# Energy-based voice activity detection parameters. +# RMS threshold (int16 scale 0–32768): frames below this are considered silence. +_SILENCE_THRESHOLD_RMS = 500 +# Seconds of continuous silence after speech before the segment is flushed. +_SILENCE_DURATION_S = 0.8 +# Maximum segment duration before a forced flush (prevents unbounded buffering). +_MAX_BUFFER_DURATION_S = 30.0 + class OpenAiSttAgent(EventEmitter): def __init__(self, config: OpenAiConfig): @@ -74,15 +83,11 @@ def start_transcription_for_user(self, user_id: str, locale: str, provider: str) ) return - openai_locale = self._sanitize_locale(locale) - stt_stream = self.stt.stream(language=openai_locale) + language = self._sanitize_locale(locale) task = asyncio.create_task( - self._run_transcription_pipeline(participant, track, stt_stream) + self._run_transcription_pipeline(participant, track, language) ) - self.processing_info[participant.identity] = { - "stream": stt_stream, - "task": task, - } + self.processing_info[participant.identity] = {"task": task} logging.info( f"Started transcription for {participant.identity} with locale {locale}." ) @@ -104,7 +109,7 @@ def update_locale_for_user(self, user_id: str, locale: str): provider = self.participant_settings.get(user_id, {}).get( "provider", "openai" ) - # OpenAI STT does not support live stream.update_options(); restart the pipeline. + # Restart the pipeline with the new locale. self.stop_transcription_for_user(user_id) self.start_transcription_for_user(user_id, locale, provider) else: @@ -179,40 +184,88 @@ async def _run_transcription_pipeline( self, participant: rtc.RemoteParticipant, track: rtc.Track, - stt_stream: stt.SpeechStream, + language: str, ): + """Collect audio, segment by silence, and transcribe via REST API. + + The OpenAI STT plugin's stream() method requires the Realtime WebSocket + API which not all backends support. Using recognize() hits the standard + REST /audio/transcriptions endpoint and works with any Whisper-compatible + backend. + + TODO: Support the /realtime WebSocket endpoint as an opt-in mode (e.g. + via an OpenAiConfig flag like `use_realtime: bool`). When enabled, + delegate to openai_plugin.STT.stream() directly instead of this + energy-based segmentation loop. This would unlock interim results and + lower latency for backends that implement the OpenAI Realtime API + (e.g. gpt-4o-transcribe). + """ audio_stream = rtc.AudioStream(track) self.open_time = time.time() - async def forward_audio_task(): + speech_buffer: list[rtc.AudioFrame] = [] + buffer_duration = 0.0 + silence_duration = 0.0 + was_speaking = False + + async def flush_segment(frames: list[rtc.AudioFrame]) -> None: + if not frames: + return try: - async for audio_event in audio_stream: - stt_stream.push_frame(audio_event.frame) - finally: - stt_stream.flush() - - async def process_stt_task(): - async for event in stt_stream: - if event.type == stt.SpeechEventType.FINAL_TRANSCRIPT: + event = await self.stt.recognize(buffer=frames, language=language) + if event.alternatives and event.alternatives[0].text: self.emit( "final_transcript", participant=participant, event=event, open_time=self.open_time, ) - elif ( - event.type == stt.SpeechEventType.INTERIM_TRANSCRIPT - and self.config.interim_results - ): - self.emit( - "interim_transcript", - participant=participant, - event=event, - open_time=self.open_time, - ) + except asyncio.CancelledError: + raise + except Exception as e: + logging.error( + f"Error transcribing segment for {participant.identity}: {e}" + ) try: - await asyncio.gather(forward_audio_task(), process_stt_task()) + async for audio_event in audio_stream: + frame = audio_event.frame + samples = np.frombuffer(frame.data, dtype=np.int16) + rms = float(np.sqrt(np.mean(samples.astype(np.float32) ** 2))) + is_speaking = rms > _SILENCE_THRESHOLD_RMS + frame_duration = frame.samples_per_channel / frame.sample_rate + + if is_speaking: + speech_buffer.append(frame) + buffer_duration += frame_duration + silence_duration = 0.0 + was_speaking = True + elif was_speaking: + # Carry silence frames so the segment has natural trailing audio. + speech_buffer.append(frame) + buffer_duration += frame_duration + silence_duration += frame_duration + + if ( + silence_duration >= _SILENCE_DURATION_S + or buffer_duration >= _MAX_BUFFER_DURATION_S + ): + await flush_segment(speech_buffer[:]) + speech_buffer.clear() + buffer_duration = 0.0 + silence_duration = 0.0 + was_speaking = False + elif buffer_duration >= _MAX_BUFFER_DURATION_S: + # Safety flush even without trailing silence. + await flush_segment(speech_buffer[:]) + speech_buffer.clear() + buffer_duration = 0.0 + silence_duration = 0.0 + was_speaking = False + + # Flush any remaining buffered speech at end of stream. + await flush_segment(speech_buffer[:]) + except asyncio.CancelledError: logging.info(f"Transcription for {participant.identity} was cancelled.") except Exception as e: diff --git a/tests/test_openai_stt_agent.py b/tests/test_openai_stt_agent.py index 44bf028..d9baed5 100644 --- a/tests/test_openai_stt_agent.py +++ b/tests/test_openai_stt_agent.py @@ -3,11 +3,12 @@ import logging from unittest.mock import AsyncMock, MagicMock, patch +import numpy as np from livekit import rtc from livekit.agents import stt from config import OpenAiConfig -from openai_stt_agent import OpenAiSttAgent +from openai_stt_agent import OpenAiSttAgent, _SILENCE_THRESHOLD_RMS def _make_agent(interim_results=None, **kwargs): @@ -50,6 +51,21 @@ def _make_participant(identity, audio_track=None): return participant +def _make_audio_event(amplitude: int = 0) -> MagicMock: + """Create a mock audio event with PCM bytes at the given amplitude.""" + samples = np.full(160, amplitude, dtype=np.int16) + event = MagicMock() + event.frame.data = samples.tobytes() + event.frame.sample_rate = 16000 + event.frame.samples_per_channel = 160 + return event + + +def _make_loud_event() -> MagicMock: + """Audio event with RMS energy above the speech threshold.""" + return _make_audio_event(amplitude=int(_SILENCE_THRESHOLD_RMS * 2)) + + class TestSanitizeLocale: def test_strips_region_from_bcp47_locale(self): agent = _make_agent() @@ -73,7 +89,7 @@ class TestStopTranscriptionForUser: def test_cancels_task_and_removes_from_processing_info(self): agent = _make_agent() mock_task = MagicMock() - agent.processing_info["user_123"] = {"task": mock_task, "stream": MagicMock()} + agent.processing_info["user_123"] = {"task": mock_task} agent.stop_transcription_for_user("user_123") @@ -103,7 +119,7 @@ def test_restarts_transcription_when_active(self): agent = _make_agent_with_room(participants={"pid": participant}) agent.participant_settings["user_1"] = {"locale": "en", "provider": "openai"} mock_task = MagicMock() - agent.processing_info["user_1"] = {"stream": MagicMock(), "task": mock_task} + agent.processing_info["user_1"] = {"task": mock_task} with ( patch.object(agent, "stop_transcription_for_user") as mock_stop, @@ -140,7 +156,7 @@ def test_stops_transcription_and_clears_settings(self): agent = _make_agent() agent.participant_settings["user_1"] = {"locale": "en", "provider": "openai"} mock_task = MagicMock() - agent.processing_info["user_1"] = {"task": mock_task, "stream": MagicMock()} + agent.processing_info["user_1"] = {"task": mock_task} mock_participant = MagicMock() mock_participant.identity = "user_1" @@ -221,14 +237,15 @@ def test_skips_when_already_processing(self, caplog): agent = _make_agent_with_room(participants={"pid": participant}) existing_task = MagicMock() - agent.processing_info["user_1"] = {"task": existing_task, "stream": MagicMock()} + agent.processing_info["user_1"] = {"task": existing_task} with caplog.at_level(logging.DEBUG): agent.start_transcription_for_user("user_1", "en-US", "openai") assert agent.processing_info["user_1"]["task"] is existing_task - async def test_creates_stream_and_task_on_success(self): + async def test_creates_task_on_success(self): + """Happy path: participant with audio track → task created.""" mock_track = MagicMock() mock_track.kind = rtc.TrackKind.KIND_AUDIO participant = _make_participant("user_1", audio_track=mock_track) @@ -239,7 +256,6 @@ async def test_creates_stream_and_task_on_success(self): assert "user_1" in agent.processing_info info = agent.processing_info["user_1"] - assert "stream" in info assert "task" in info assert agent.participant_settings["user_1"]["locale"] == "en-US" assert agent.participant_settings["user_1"]["provider"] == "openai" @@ -248,24 +264,28 @@ async def test_creates_stream_and_task_on_success(self): with contextlib.suppress(asyncio.CancelledError): await info["task"] - async def test_sanitizes_locale_before_creating_stream(self): - """Locale 'pt-BR' should be sanitized to 'pt' for OpenAI STT.""" + async def test_passes_sanitized_locale_to_pipeline(self): + """Locale 'pt-BR' should be sanitized to 'pt' when starting the pipeline.""" mock_track = MagicMock() mock_track.kind = rtc.TrackKind.KIND_AUDIO participant = _make_participant("user_1", audio_track=mock_track) agent = _make_agent_with_room(participants={"pid": participant}) - with patch("openai_stt_agent.rtc.AudioStream"): + with patch.object( + agent, "_run_transcription_pipeline", new_callable=AsyncMock + ) as mock_pipeline: agent.start_transcription_for_user("user_1", "pt-BR", "openai") - agent.stt.stream.assert_called_once_with(language="pt") + await asyncio.sleep(0) # let the task start - agent.processing_info["user_1"]["task"].cancel() - with contextlib.suppress(asyncio.CancelledError): - await agent.processing_info["user_1"]["task"] + mock_pipeline.assert_called_once_with(participant, mock_track, "pt") + + # Clean up the task + agent.processing_info.pop("user_1", None) class TestRunTranscriptionPipeline: async def test_cancellation_cleans_up_processing_info(self): + """CancelledError should be caught and processing_info entry removed.""" agent = _make_agent() mock_participant = MagicMock(spec=rtc.RemoteParticipant) mock_participant.identity = "user_1" @@ -274,99 +294,87 @@ async def test_cancellation_cleans_up_processing_info(self): mock_audio_stream = AsyncMock() mock_audio_stream.__aiter__.side_effect = asyncio.CancelledError - mock_stt_stream = AsyncMock() - mock_stt_stream.__aiter__.return_value = iter([]) - - agent.processing_info["user_1"] = { - "stream": mock_stt_stream, - "task": MagicMock(), - } + agent.processing_info["user_1"] = {"task": MagicMock()} with patch("openai_stt_agent.rtc.AudioStream", return_value=mock_audio_stream): - await agent._run_transcription_pipeline( - mock_participant, mock_track, mock_stt_stream - ) + await agent._run_transcription_pipeline(mock_participant, mock_track, "en") assert "user_1" not in agent.processing_info async def test_emits_final_transcript_event(self): + """Speech frames trigger a final_transcript event via recognize().""" agent = _make_agent() mock_participant = MagicMock(spec=rtc.RemoteParticipant) mock_participant.identity = "user_1" mock_track = MagicMock() + # One loud audio frame followed by end-of-stream → triggers end-of-stream flush + loud_event = _make_loud_event() mock_audio_stream = AsyncMock() - mock_audio_stream.__aiter__.return_value = iter([]) + mock_audio_stream.__aiter__.return_value = iter([loud_event]) - mock_event = MagicMock() - mock_event.type = stt.SpeechEventType.FINAL_TRANSCRIPT - mock_stt_stream = AsyncMock() - mock_stt_stream.__aiter__.return_value = iter([mock_event]) + mock_stt_event = MagicMock() + mock_stt_event.alternatives = [MagicMock(text="hello world")] + agent.stt.recognize = AsyncMock(return_value=mock_stt_event) emitted = [] agent.on("final_transcript", lambda **kw: emitted.append(kw)) with patch("openai_stt_agent.rtc.AudioStream", return_value=mock_audio_stream): - await agent._run_transcription_pipeline( - mock_participant, mock_track, mock_stt_stream - ) + await agent._run_transcription_pipeline(mock_participant, mock_track, "en") await asyncio.sleep(0) assert len(emitted) == 1 assert emitted[0]["participant"] is mock_participant - assert emitted[0]["event"] is mock_event + assert emitted[0]["event"] is mock_stt_event - async def test_emits_interim_transcript_when_enabled(self): - agent = _make_agent(interim_results=True) + async def test_does_not_emit_when_recognize_returns_empty_text(self): + """No event should be emitted if the transcription result is empty.""" + agent = _make_agent() mock_participant = MagicMock(spec=rtc.RemoteParticipant) mock_participant.identity = "user_1" mock_track = MagicMock() + loud_event = _make_loud_event() mock_audio_stream = AsyncMock() - mock_audio_stream.__aiter__.return_value = iter([]) + mock_audio_stream.__aiter__.return_value = iter([loud_event]) - mock_event = MagicMock() - mock_event.type = stt.SpeechEventType.INTERIM_TRANSCRIPT - mock_stt_stream = AsyncMock() - mock_stt_stream.__aiter__.return_value = iter([mock_event]) + mock_stt_event = MagicMock() + mock_stt_event.alternatives = [MagicMock(text="")] + agent.stt.recognize = AsyncMock(return_value=mock_stt_event) emitted = [] - agent.on("interim_transcript", lambda **kw: emitted.append(kw)) + agent.on("final_transcript", lambda **kw: emitted.append(kw)) with patch("openai_stt_agent.rtc.AudioStream", return_value=mock_audio_stream): - await agent._run_transcription_pipeline( - mock_participant, mock_track, mock_stt_stream - ) - await asyncio.sleep(0) + await agent._run_transcription_pipeline(mock_participant, mock_track, "en") - assert len(emitted) == 1 + assert len(emitted) == 0 - async def test_suppresses_interim_transcript_when_disabled(self): - agent = _make_agent(interim_results=False) + async def test_does_not_emit_for_silent_audio(self): + """Silent frames (below energy threshold) should not trigger recognition.""" + agent = _make_agent() mock_participant = MagicMock(spec=rtc.RemoteParticipant) mock_participant.identity = "user_1" mock_track = MagicMock() + silent_event = _make_audio_event(amplitude=0) mock_audio_stream = AsyncMock() - mock_audio_stream.__aiter__.return_value = iter([]) + mock_audio_stream.__aiter__.return_value = iter([silent_event]) - mock_event = MagicMock() - mock_event.type = stt.SpeechEventType.INTERIM_TRANSCRIPT - mock_stt_stream = AsyncMock() - mock_stt_stream.__aiter__.return_value = iter([mock_event]) + agent.stt.recognize = AsyncMock() emitted = [] - agent.on("interim_transcript", lambda **kw: emitted.append(kw)) + agent.on("final_transcript", lambda **kw: emitted.append(kw)) with patch("openai_stt_agent.rtc.AudioStream", return_value=mock_audio_stream): - await agent._run_transcription_pipeline( - mock_participant, mock_track, mock_stt_stream - ) - await asyncio.sleep(0) + await agent._run_transcription_pipeline(mock_participant, mock_track, "en") + agent.stt.recognize.assert_not_called() assert len(emitted) == 0 async def test_generic_exception_cleans_up_processing_info(self): + """Unexpected exceptions should be caught and processing_info cleaned up.""" agent = _make_agent() mock_participant = MagicMock(spec=rtc.RemoteParticipant) mock_participant.identity = "user_1" @@ -375,29 +383,22 @@ async def test_generic_exception_cleans_up_processing_info(self): mock_audio_stream = AsyncMock() mock_audio_stream.__aiter__.side_effect = RuntimeError("boom") - mock_stt_stream = AsyncMock() - mock_stt_stream.__aiter__.return_value = iter([]) - - agent.processing_info["user_1"] = { - "stream": mock_stt_stream, - "task": MagicMock(), - } + agent.processing_info["user_1"] = {"task": MagicMock()} with patch("openai_stt_agent.rtc.AudioStream", return_value=mock_audio_stream): - await agent._run_transcription_pipeline( - mock_participant, mock_track, mock_stt_stream - ) + await agent._run_transcription_pipeline(mock_participant, mock_track, "en") assert "user_1" not in agent.processing_info class TestCleanup: async def test_cleanup_stops_all_active_transcriptions(self): + """_cleanup() should cancel all active tasks.""" agent = _make_agent() tasks = {} for uid in ("user_1", "user_2", "user_3"): mock_task = MagicMock() - agent.processing_info[uid] = {"task": mock_task, "stream": MagicMock()} + agent.processing_info[uid] = {"task": mock_task} tasks[uid] = mock_task await agent._cleanup() From 31e055e8f3ef8d8f33d15c86870da5e93d6050f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nathana=C3=ABl=20HANNEBERT?= Date: Tue, 10 Mar 2026 18:28:43 +0100 Subject: [PATCH 3/4] fix(openai): bypass OpenAI SDK to fix 405 on custom base_url The livekit OpenAI plugin's recognize() uses the OpenAI Python SDK which constructs the URL as {base_url}/audio/transcriptions (no /v1/), causing 405 Method Not Allowed on backends like my-selfhosted-openwebui.com/api/. Replace with a direct aiohttp POST to {base_url}/v1/audio/transcriptions, matching the approach used in bbb-livekit-transcriber. Also manage the aiohttp session lifecycle within the agent. --- openai_stt_agent.py | 50 +++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 46 insertions(+), 4 deletions(-) diff --git a/openai_stt_agent.py b/openai_stt_agent.py index 55169fb..d20b12e 100644 --- a/openai_stt_agent.py +++ b/openai_stt_agent.py @@ -2,6 +2,7 @@ import logging import time +import aiohttp import numpy as np from livekit import rtc from livekit.agents import ( @@ -9,7 +10,6 @@ JobContext, stt, ) -from livekit.plugins import openai as openai_plugin from config import OpenAiConfig from events import EventEmitter @@ -27,13 +27,46 @@ class OpenAiSttAgent(EventEmitter): def __init__(self, config: OpenAiConfig): super().__init__() self.config = config - self.stt = openai_plugin.STT(**config.to_dict()) self.ctx: JobContext | None = None self.room: rtc.Room | None = None self.processing_info = {} self.participant_settings = {} self.open_time = time.time() self._shutdown = asyncio.Event() + self._http_session: aiohttp.ClientSession | None = None + + def _get_http_session(self) -> aiohttp.ClientSession: + if self._http_session is None: + self._http_session = aiohttp.ClientSession( + timeout=aiohttp.ClientTimeout(total=30) + ) + return self._http_session + + async def _transcribe_wav(self, wav_bytes: bytes, language: str) -> str: + """Call the OpenAI-compatible REST endpoint directly. + + Constructs the URL as ``{base_url}/v1/audio/transcriptions`` so that + custom backends (e.g. llm.vates.tech/api/) work correctly regardless of + whether the OpenAI SDK would strip or double the ``/v1`` path segment. + """ + base_url = (self.config.base_url or "https://api.openai.com").rstrip("/") + url = f"{base_url}/v1/audio/transcriptions" + + form = aiohttp.FormData() + form.add_field( + "file", wav_bytes, filename="audio.wav", content_type="audio/wav" + ) + form.add_field("model", self.config.model) + form.add_field("response_format", "json") + if language: + form.add_field("language", language) + + headers = {"Authorization": f"Bearer {self.config.api_key}"} + session = self._get_http_session() + async with session.post(url, data=form, headers=headers) as resp: + resp.raise_for_status() + result = await resp.json() + return result.get("text", "").strip() async def start(self, ctx: JobContext): self.ctx = ctx @@ -56,6 +89,10 @@ async def _cleanup(self): await asyncio.sleep(0.1) + if self._http_session: + await self._http_session.close() + self._http_session = None + def start_transcription_for_user(self, user_id: str, locale: str, provider: str): settings = self.participant_settings.setdefault(user_id, {}) settings["locale"] = locale @@ -212,8 +249,13 @@ async def flush_segment(frames: list[rtc.AudioFrame]) -> None: if not frames: return try: - event = await self.stt.recognize(buffer=frames, language=language) - if event.alternatives and event.alternatives[0].text: + wav_bytes = rtc.combine_audio_frames(frames).to_wav_bytes() + text = await self._transcribe_wav(wav_bytes, language) + if text: + event = stt.SpeechEvent( + type=stt.SpeechEventType.FINAL_TRANSCRIPT, + alternatives=[stt.SpeechData(text=text, language=language)], + ) self.emit( "final_transcript", participant=participant, From 2a19c9194bb4b6b3e26ed15c682e678edff81cc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nathana=C3=ABl=20HANNEBERT?= Date: Wed, 11 Mar 2026 11:24:28 +0100 Subject: [PATCH 4/4] fix(openai): track segment start/end times to fix empty caption recordings Each speech segment was missing start_time/end_time on SpeechData, causing all transcripts to share the same transcriptId (open_time + 0.0). BBB's AudioCaptions model treated every utterance after the first as a same-ID update, returning empty text, which resulted in an empty VTT file in recordings even though live captions worked correctly. --- openai_stt_agent.py | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/openai_stt_agent.py b/openai_stt_agent.py index d20b12e..ee531f0 100644 --- a/openai_stt_agent.py +++ b/openai_stt_agent.py @@ -244,17 +244,24 @@ async def _run_transcription_pipeline( buffer_duration = 0.0 silence_duration = 0.0 was_speaking = False + speech_start_time = 0.0 - async def flush_segment(frames: list[rtc.AudioFrame]) -> None: + async def flush_segment(frames: list[rtc.AudioFrame], seg_start: float) -> None: if not frames: return try: wav_bytes = rtc.combine_audio_frames(frames).to_wav_bytes() text = await self._transcribe_wav(wav_bytes, language) if text: + seg_end = time.time() - self.open_time event = stt.SpeechEvent( type=stt.SpeechEventType.FINAL_TRANSCRIPT, - alternatives=[stt.SpeechData(text=text, language=language)], + alternatives=[stt.SpeechData( + text=text, + language=language, + start_time=seg_start, + end_time=seg_end, + )], ) self.emit( "final_transcript", @@ -278,6 +285,8 @@ async def flush_segment(frames: list[rtc.AudioFrame]) -> None: frame_duration = frame.samples_per_channel / frame.sample_rate if is_speaking: + if not was_speaking: + speech_start_time = time.time() - self.open_time speech_buffer.append(frame) buffer_duration += frame_duration silence_duration = 0.0 @@ -292,21 +301,21 @@ async def flush_segment(frames: list[rtc.AudioFrame]) -> None: silence_duration >= _SILENCE_DURATION_S or buffer_duration >= _MAX_BUFFER_DURATION_S ): - await flush_segment(speech_buffer[:]) + await flush_segment(speech_buffer[:], speech_start_time) speech_buffer.clear() buffer_duration = 0.0 silence_duration = 0.0 was_speaking = False elif buffer_duration >= _MAX_BUFFER_DURATION_S: # Safety flush even without trailing silence. - await flush_segment(speech_buffer[:]) + await flush_segment(speech_buffer[:], speech_start_time) speech_buffer.clear() buffer_duration = 0.0 silence_duration = 0.0 was_speaking = False # Flush any remaining buffered speech at end of stream. - await flush_segment(speech_buffer[:]) + await flush_segment(speech_buffer[:], speech_start_time) except asyncio.CancelledError: logging.info(f"Transcription for {participant.identity} was cancelled.")