diff --git a/mellea/backends/litellm.py b/mellea/backends/litellm.py new file mode 100644 index 0000000..0cc9fb4 --- /dev/null +++ b/mellea/backends/litellm.py @@ -0,0 +1,426 @@ +"""A generic OpenAI compatible backend that wraps around the openai python sdk.""" + +import datetime +import json +from collections.abc import Callable +from typing import Any + +import litellm +import litellm.litellm_core_utils +import litellm.litellm_core_utils.get_supported_openai_params + +import mellea.backends.model_ids as model_ids +from mellea.backends import BaseModelSubclass +from mellea.backends.formatter import Formatter, FormatterBackend, TemplateFormatter +from mellea.backends.tools import convert_tools_to_json, get_tools_from_action +from mellea.backends.types import ModelOption +from mellea.helpers.fancy_logger import FancyLogger +from mellea.stdlib.base import ( + CBlock, + Component, + Context, + GenerateLog, + ModelOutputThunk, + ModelToolCall, + TemplateRepresentation, +) +from mellea.stdlib.chat import Message +from mellea.stdlib.requirement import ALoraRequirement, LLMaJRequirement, Requirement + + +class LiteLLMBackend(FormatterBackend): + """A generic LiteLLM compatible backend.""" + + def __init__( + self, + model_id: str = "ollama/" + str(model_ids.IBM_GRANITE_3_3_8B.ollama_name), + formatter: Formatter | None = None, + base_url: str | None = "http://localhost:11434", + model_options: dict | None = None, + ): + """Initialize and OpenAI compatible backend. For any additional kwargs that you need to pass the the client, pass them as a part of **kwargs. + + Args: + model_id : The LiteLLM model identifier. Make sure that all necessary credentials are in OS environment variables. + formatter: A custom formatter based on backend.If None, defaults to TemplateFormatter + base_url : Base url for LLM API. Defaults to None. + model_options : Generation options to pass to the LLM. Defaults to None. + """ + super().__init__( + model_id=model_id, + formatter=( + formatter + if formatter is not None + else TemplateFormatter(model_id=model_id) + ), + model_options=model_options, + ) + + assert isinstance(model_id, str), "Model ID must be a string." + self._model_id = model_id + + if base_url is None: + self._base_url = "http://localhost:11434/v1" # ollama + else: + self._base_url = base_url + + # A mapping of common options for this backend mapped to their Mellea ModelOptions equivalent. + # These are usually values that must be extracted before hand or that are common among backend providers. + # OpenAI has some deprecated parameters. Those map to the same mellea parameter, but + # users should only be specifying a single one in their request. + self.to_mellea_model_opts_map = { + "system": ModelOption.SYSTEM_PROMPT, + "reasoning_effort": ModelOption.THINKING, # TODO: JAL; see which of these are actually extracted... + "seed": ModelOption.SEED, + "max_completion_tokens": ModelOption.MAX_NEW_TOKENS, + "max_tokens": ModelOption.MAX_NEW_TOKENS, + "tools": ModelOption.TOOLS, + "functions": ModelOption.TOOLS, + } + + # A mapping of Mellea specific ModelOptions to the specific names for this backend. + # These options should almost always be a subset of those specified in the `to_mellea_model_opts_map`. + # Usually, values that are intentionally extracted while prepping for the backend generate call + # will be omitted here so that they will be removed when model_options are processed + # for the call to the model. + self.from_mellea_model_opts_map = { + ModelOption.SEED: "seed", + ModelOption.MAX_NEW_TOKENS: "max_completion_tokens", + ModelOption.THINKING: "reasoning_effort", + } + + def generate_from_context( + self, + action: Component | CBlock, + ctx: Context, + *, + format: type[BaseModelSubclass] | None = None, + model_options: dict | None = None, + generate_logs: list[GenerateLog] | None = None, + tool_calls: bool = False, + ): + """See `generate_from_chat_context`.""" + assert ctx.is_chat_context, NotImplementedError( + "The Openai backend only supports chat-like contexts." + ) + return self._generate_from_chat_context_standard( + action, + ctx, + format=format, + model_options=model_options, + generate_logs=generate_logs, + tool_calls=tool_calls, + ) + + def _simplify_and_merge( + self, model_options: dict[str, Any] | None + ) -> dict[str, Any]: + """Simplifies model_options to use the Mellea specific ModelOption.Option and merges the backend's model_options with those passed into this call. + + Rules: + - Within a model_options dict, existing keys take precedence. This means remapping to mellea specific keys will maintain the value of the mellea specific key if one already exists. + - When merging, the keys/values from the dictionary passed into this function take precedence. + + Because this function simplifies and then merges, non-Mellea keys from the passed in model_options will replace + Mellea specific keys from the backend's model_options. + + Args: + model_options: the model_options for this call + + Returns: + a new dict + """ + backend_model_opts = ModelOption.replace_keys( + self.model_options, self.to_mellea_model_opts_map + ) + + if model_options is None: + return backend_model_opts + + generate_call_model_opts = ModelOption.replace_keys( + model_options, self.to_mellea_model_opts_map + ) + return ModelOption.merge_model_options( + backend_model_opts, generate_call_model_opts + ) + + def _make_backend_specific_and_remove( + self, model_options: dict[str, Any] + ) -> dict[str, Any]: + """Maps specified Mellea specific keys to their backend specific version and removes any remaining Mellea keys. + + Additionally, logs any params unknown to litellm and any params that are openai specific but not supported by this model/provider. + + Args: + model_options: the model_options for this call + + Returns: + a new dict + """ + backend_specific = ModelOption.replace_keys( + model_options, self.from_mellea_model_opts_map + ) + backend_specific = ModelOption.remove_special_keys(backend_specific) + + # We set `drop_params=True` which will drop non-supported openai params; check for non-openai + # params that might cause errors and log which openai params aren't supported here. + # See https://docs.litellm.ai/docs/completion/input. + standard_openai_subset = litellm.get_standard_openai_params(backend_specific) + supported_params_list = litellm.litellm_core_utils.get_supported_openai_params.get_supported_openai_params( + self._model_id + ) + supported_params = ( + set(supported_params_list) if supported_params_list is not None else set() + ) + + unknown_keys = [] # keys that are unknown to litellm + unsupported_openai_params = [] # openai params that are known to litellm but not supported for this model/provider + for key in backend_specific.keys(): + if key not in standard_openai_subset.keys(): + unknown_keys.append(key) + + elif key not in supported_params: + unsupported_openai_params.append(key) + + if len(unknown_keys) > 0: + FancyLogger.get_logger().warning( + f"litellm allows for unknown / non-openai input params; mellea won't validate the following params that may cause issues: {', '.join(unknown_keys)}" + ) + + if len(unsupported_openai_params) > 0: + FancyLogger.get_logger().warning( + f"litellm will automatically drop the following openai keys that aren't supported by the current model/provider: {', '.join(unsupported_openai_params)}" + ) + + return backend_specific + + def _generate_from_chat_context_standard( + self, + action: Component | CBlock, + ctx: Context, + *, + format: type[BaseModelSubclass] + | None = None, # Type[BaseModelSubclass] is a class object of a subclass of BaseModel + model_options: dict | None = None, + generate_logs: list[GenerateLog] | None = None, + tool_calls: bool = False, + ) -> ModelOutputThunk: + model_opts = self._simplify_and_merge(model_options) + linearized_context = ctx.linearize() + assert linearized_context is not None, ( + "Cannot generate from a non-linear context in a FormatterBackend." + ) + # Convert our linearized context into a sequence of chat messages. Template formatters have a standard way of doing this. + messages: list[Message] = self.formatter.to_chat_messages(linearized_context) + # Add the final message. + match action: + case ALoraRequirement(): + raise Exception("The LiteLLM backend does not support activated LoRAs.") + case _: + messages.extend(self.formatter.to_chat_messages([action])) + + conversation: list[dict] = [] + system_prompt = model_opts.get(ModelOption.SYSTEM_PROMPT, "") + if system_prompt != "": + conversation.append({"role": "system", "content": system_prompt}) + conversation.extend([{"role": m.role, "content": m.content} for m in messages]) + + if format is not None: + response_format = { + "type": "json_schema", + "json_schema": { + "name": format.__name__, + "schema": format.model_json_schema(), + "strict": True, + }, + } + else: + response_format = {"type": "text"} + + thinking = model_opts.get(ModelOption.THINKING, None) + if type(thinking) is bool and thinking: + # OpenAI uses strings for its reasoning levels. + thinking = "medium" + + # Append tool call information if applicable. + tools = self._extract_tools(action, format, model_opts, tool_calls) + formatted_tools = convert_tools_to_json(tools) if len(tools) > 0 else None + + chat_response: litellm.ModelResponse = litellm.completion( + model=self._model_id, + messages=conversation, + tools=formatted_tools, + response_format=response_format, + reasoning_effort=thinking, # type: ignore + drop_params=True, # See note in `_make_backend_specific_and_remove`. + **self._make_backend_specific_and_remove(model_opts), + ) + + choice_0 = chat_response.choices[0] + assert isinstance(choice_0, litellm.utils.Choices), ( + "Only works for non-streaming response for now" + ) + result = ModelOutputThunk( + value=choice_0.message.content, + meta={ + "litellm_chat_response": chat_response.choices[0].model_dump() + }, # NOTE: Using model dump here to comply with `TemplateFormatter` + tool_calls=self._extract_model_tool_requests(tools, chat_response), + ) + + parsed_result = self.formatter.parse(source_component=action, result=result) + + if generate_logs is not None: + assert isinstance(generate_logs, list) + generate_log = GenerateLog() + generate_log.prompt = conversation + generate_log.backend = f"litellm::{self.model_id!s}" + generate_log.model_options = model_opts + generate_log.date = datetime.datetime.now() + generate_log.model_output = chat_response + generate_log.extra = { + "format": format, + "tools_available": tools, + "tools_called": result.tool_calls, + "seed": model_opts.get("seed", None), + } + generate_log.action = action + generate_log.result = parsed_result + generate_logs.append(generate_log) + + return parsed_result + + @staticmethod + def _extract_tools(action, format, model_opts, tool_calls): + tools: dict[str, Callable] = dict() + if tool_calls: + if format: + FancyLogger.get_logger().warning( + f"Tool calling typically uses constrained generation, but you have specified a `format` in your generate call. NB: tool calling is superseded by format; we will NOT call tools for your request: {action}" + ) + else: + if isinstance(action, Component) and isinstance( + action.format_for_llm(), TemplateRepresentation + ): + tools = get_tools_from_action(action) + + model_options_tools = model_opts.get(ModelOption.TOOLS, None) + if model_options_tools is not None: + assert isinstance(model_options_tools, dict) + for fn_name in model_options_tools: + # invariant re: relationship between the model_options set of tools and the TemplateRepresentation set of tools + assert fn_name not in tools.keys(), ( + f"Cannot add tool {fn_name} because that tool was already defined in the TemplateRepresentation for the action." + ) + # type checking because ModelOptions is an untyped dict and the calling convention for tools isn't clearly documented at our abstraction boundaries. + assert type(fn_name) is str, ( + "When providing a `ModelOption.TOOLS` parameter to `model_options`, always used the type Dict[str, Callable] where `str` is the function name and the callable is the function." + ) + assert callable(model_options_tools[fn_name]), ( + "When providing a `ModelOption.TOOLS` parameter to `model_options`, always used the type Dict[str, Callable] where `str` is the function name and the callable is the function." + ) + # Add the model_options tool to the existing set of tools. + tools[fn_name] = model_options_tools[fn_name] + return tools + + def _generate_from_raw( + self, + actions: list[Component | CBlock], + *, + format: type[BaseModelSubclass] | None = None, + model_options: dict | None = None, + generate_logs: list[GenerateLog] | None = None, + ) -> list[ModelOutputThunk]: + """Generate using the completions api. Gives the input provided to the model without templating.""" + raise NotImplementedError("This method is not implemented yet.") + # extra_body = {} + # if format is not None: + # FancyLogger.get_logger().warning( + # "The official OpenAI completion api does not accept response format / structured decoding; " + # "it will be passed as an extra arg." + # ) + # + # # Some versions (like vllm's version) of the OpenAI API support structured decoding for completions requests. + # extra_body["guided_json"] = format.model_json_schema() + # + # model_opts = self._simplify_and_merge(model_options, is_chat_context=False) + # + # prompts = [self.formatter.print(action) for action in actions] + # + # try: + # completion_response: Completion = self._client.completions.create( + # model=self._hf_model_id, + # prompt=prompts, + # extra_body=extra_body, + # **self._make_backend_specific_and_remove( + # model_opts, is_chat_context=False + # ), + # ) # type: ignore + # except openai.BadRequestError as e: + # if openai_ollama_batching_error in e.message: + # FancyLogger.get_logger().error( + # "If you are trying to call `OpenAIBackend._generate_from_raw while targeting an ollama server, " + # "your requests will fail since ollama doesn't support batching requests." + # ) + # raise e + # + # # Necessary for type checker. + # assert isinstance(completion_response, Completion) + # + # results = [ + # ModelOutputThunk( + # value=response.text, + # meta={"oai_completion_response": response.model_dump()}, + # ) + # for response in completion_response.choices + # ] + # + # for i, result in enumerate(results): + # self.formatter.parse(actions[i], result) + # + # if generate_logs is not None: + # assert isinstance(generate_logs, list) + # date = datetime.datetime.now() + # + # for i in range(len(prompts)): + # generate_log = GenerateLog() + # generate_log.prompt = prompts[i] + # generate_log.backend = f"openai::{self.model_id!s}" + # generate_log.model_options = model_opts + # generate_log.date = date + # generate_log.model_output = completion_response + # generate_log.extra = {"seed": model_opts.get("seed", None)} + # generate_log.action = actions[i] + # generate_log.result = results[i] + # generate_logs.append(generate_log) + # + # return results + + def _extract_model_tool_requests( + self, tools: dict[str, Callable], chat_response: litellm.ModelResponse + ) -> dict[str, ModelToolCall] | None: + model_tool_calls: dict[str, ModelToolCall] = {} + choice_0 = chat_response.choices[0] + assert isinstance(choice_0, litellm.utils.Choices), ( + "Only works for non-streaming response for now" + ) + calls = choice_0.message.tool_calls + if calls: + for tool_call in calls: + tool_name = str(tool_call.function.name) + tool_args = tool_call.function.arguments + + func = tools.get(tool_name) + if func is None: + FancyLogger.get_logger().warning( + f"model attempted to call a non-existing function: {tool_name}" + ) + continue # skip this function if we can't find it. + + # Returns the args as a string. Parse it here. + args = json.loads(tool_args) + model_tool_calls[tool_name] = ModelToolCall(tool_name, func, args) + + if len(model_tool_calls) > 0: + return model_tool_calls + return None diff --git a/pyproject.toml b/pyproject.toml index f88033d..610f776 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -49,7 +49,8 @@ dependencies = [ "mistletoe>=1.4.0", "trl==0.19.0", "peft", - "torch" + "torch", + "litellm>=1.75.5.post1", ] [project.scripts] diff --git a/test/backends/test_litellm_ollama.py b/test/backends/test_litellm_ollama.py new file mode 100644 index 0000000..846abd0 --- /dev/null +++ b/test/backends/test_litellm_ollama.py @@ -0,0 +1,42 @@ +import mellea +from mellea import MelleaSession +from mellea.backends import ModelOption +from mellea.backends.litellm import LiteLLMBackend +from mellea.stdlib.chat import Message +from mellea.stdlib.sampling import RejectionSamplingStrategy + + +class TestLitellmOllama: + m = MelleaSession(LiteLLMBackend()) + + def test_litellm_ollama_chat(self): + res = self.m.chat("hello world") + assert res is not None + assert isinstance(res, Message) + + def test_litellm_ollama_instruct(self): + res = self.m.instruct( + "Write an email to the interns.", + requirements=["be funny"], + strategy=RejectionSamplingStrategy(loop_budget=3) + ) + assert res is not None + assert isinstance(res.value, str) + + def test_litellm_ollama_instruct_options(self): + res = self.m.instruct( + "Write an email to the interns.", + requirements=["be funny"], + model_options={ + ModelOption.SEED: 123, + ModelOption.TEMPERATURE: .5, + ModelOption.THINKING:True, + ModelOption.MAX_NEW_TOKENS:100, + "stream":False, + "homer_simpson":"option should be kicked out" + } + ) + assert res is not None + assert isinstance(res.value, str) + + diff --git a/uv.lock b/uv.lock index e01cdce..d0394c0 100644 --- a/uv.lock +++ b/uv.lock @@ -1233,6 +1233,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/ff/62/85c4c919272577931d407be5ba5d71c20f0b616d31a0befe0ae45bb79abd/imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b", size = 8769, upload-time = "2022-07-01T12:21:02.467Z" }, ] +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, +] + [[package]] name = "iniconfig" version = "2.1.0" @@ -1816,6 +1828,28 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/83/60/d497a310bde3f01cb805196ac61b7ad6dc5dcf8dce66634dc34364b20b4f/lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc", size = 12097, upload-time = "2024-04-05T13:03:10.514Z" }, ] +[[package]] +name = "litellm" +version = "1.75.5.post1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "click" }, + { name = "httpx" }, + { name = "importlib-metadata" }, + { name = "jinja2" }, + { name = "jsonschema" }, + { name = "openai" }, + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "tiktoken" }, + { name = "tokenizers" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/10/97/6091a020895102a20f1da204ebe68c1293123555476b38e749f95ba5981c/litellm-1.75.5.post1.tar.gz", hash = "sha256:e40a0e4b25032755dc5df7f02742abe9e3b8836236363f605f3bdd363cb5a0d0", size = 10127846, upload-time = "2025-08-10T16:30:23.788Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/76/780f68a3b26227136a5147c76860aacedcae9bf1b7afc1c991ec9cad11bc/litellm-1.75.5.post1-py3-none-any.whl", hash = "sha256:1c72809a9c8f6e132ad06eb7e628f674c775b0ce6bfb58cbd37e8b585d929cb7", size = 8895997, upload-time = "2025-08-10T16:30:21.325Z" }, +] + [[package]] name = "lomond" version = "0.3.3" @@ -2034,6 +2068,7 @@ dependencies = [ { name = "ibm-watsonx-ai" }, { name = "jinja2" }, { name = "json5" }, + { name = "litellm" }, { name = "mistletoe" }, { name = "ollama" }, { name = "openai" }, @@ -2107,6 +2142,7 @@ requires-dist = [ { name = "ibm-watsonx-ai" }, { name = "jinja2" }, { name = "json5" }, + { name = "litellm", specifier = ">=1.75.5.post1" }, { name = "mistletoe", specifier = ">=1.4.0" }, { name = "mypy", marker = "extra == 'dev'" }, { name = "mypy-extensions", marker = "extra == 'dev'" }, @@ -2811,7 +2847,7 @@ wheels = [ [[package]] name = "openai" -version = "1.97.0" +version = "1.99.9" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "anyio" }, @@ -2823,9 +2859,9 @@ dependencies = [ { name = "tqdm" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/e0/c6/b8d66e4f3b95493a8957065b24533333c927dc23817abe397f13fe589c6e/openai-1.97.0.tar.gz", hash = "sha256:0be349569ccaa4fb54f97bb808423fd29ccaeb1246ee1be762e0c81a47bae0aa", size = 493850, upload-time = "2025-07-16T16:37:35.196Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/d2/ef89c6f3f36b13b06e271d3cc984ddd2f62508a0972c1cbcc8485a6644ff/openai-1.99.9.tar.gz", hash = "sha256:f2082d155b1ad22e83247c3de3958eb4255b20ccf4a1de2e6681b6957b554e92", size = 506992, upload-time = "2025-08-12T02:31:10.054Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8a/91/1f1cf577f745e956b276a8b1d3d76fa7a6ee0c2b05db3b001b900f2c71db/openai-1.97.0-py3-none-any.whl", hash = "sha256:a1c24d96f4609f3f7f51c9e1c2606d97cc6e334833438659cfd687e9c972c610", size = 764953, upload-time = "2025-07-16T16:37:33.135Z" }, + { url = "https://files.pythonhosted.org/packages/e8/fb/df274ca10698ee77b07bff952f302ea627cc12dac6b85289485dd77db6de/openai-1.99.9-py3-none-any.whl", hash = "sha256:9dbcdb425553bae1ac5d947147bebbd630d91bbfc7788394d4c4f3a35682ab3a", size = 786816, upload-time = "2025-08-12T02:31:08.34Z" }, ] [[package]] @@ -4888,6 +4924,42 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3a/d8/1ba8f32bfc9cb69e37edeca93738e883f478fbe84ae401f72c0d8d507841/tifffile-2025.6.11-py3-none-any.whl", hash = "sha256:32effb78b10b3a283eb92d4ebf844ae7e93e151458b0412f38518b4e6d2d7542", size = 230800, upload-time = "2025-06-12T04:49:37.458Z" }, ] +[[package]] +name = "tiktoken" +version = "0.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "regex" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/86/ad0155a37c4f310935d5ac0b1ccf9bdb635dcb906e0a9a26b616dd55825a/tiktoken-0.11.0.tar.gz", hash = "sha256:3c518641aee1c52247c2b97e74d8d07d780092af79d5911a6ab5e79359d9b06a", size = 37648, upload-time = "2025-08-08T23:58:08.495Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8b/4d/c6a2e7dca2b4f2e9e0bfd62b3fe4f114322e2c028cfba905a72bc76ce479/tiktoken-0.11.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:8a9b517d6331d7103f8bef29ef93b3cca95fa766e293147fe7bacddf310d5917", size = 1059937, upload-time = "2025-08-08T23:57:28.57Z" }, + { url = "https://files.pythonhosted.org/packages/41/54/3739d35b9f94cb8dc7b0db2edca7192d5571606aa2369a664fa27e811804/tiktoken-0.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b4ddb1849e6bf0afa6cc1c5d809fb980ca240a5fffe585a04e119519758788c0", size = 999230, upload-time = "2025-08-08T23:57:30.241Z" }, + { url = "https://files.pythonhosted.org/packages/dd/f4/ec8d43338d28d53513004ebf4cd83732a135d11011433c58bf045890cc10/tiktoken-0.11.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:10331d08b5ecf7a780b4fe4d0281328b23ab22cdb4ff65e68d56caeda9940ecc", size = 1130076, upload-time = "2025-08-08T23:57:31.706Z" }, + { url = "https://files.pythonhosted.org/packages/94/80/fb0ada0a882cb453caf519a4bf0d117c2a3ee2e852c88775abff5413c176/tiktoken-0.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b062c82300341dc87e0258c69f79bed725f87e753c21887aea90d272816be882", size = 1183942, upload-time = "2025-08-08T23:57:33.142Z" }, + { url = "https://files.pythonhosted.org/packages/2f/e9/6c104355b463601719582823f3ea658bc3aa7c73d1b3b7553ebdc48468ce/tiktoken-0.11.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:195d84bec46169af3b1349a1495c151d37a0ff4cba73fd08282736be7f92cc6c", size = 1244705, upload-time = "2025-08-08T23:57:34.594Z" }, + { url = "https://files.pythonhosted.org/packages/94/75/eaa6068f47e8b3f0aab9e05177cce2cf5aa2cc0ca93981792e620d4d4117/tiktoken-0.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:fe91581b0ecdd8783ce8cb6e3178f2260a3912e8724d2f2d49552b98714641a1", size = 884152, upload-time = "2025-08-08T23:57:36.18Z" }, + { url = "https://files.pythonhosted.org/packages/8a/91/912b459799a025d2842566fe1e902f7f50d54a1ce8a0f236ab36b5bd5846/tiktoken-0.11.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4ae374c46afadad0f501046db3da1b36cd4dfbfa52af23c998773682446097cf", size = 1059743, upload-time = "2025-08-08T23:57:37.516Z" }, + { url = "https://files.pythonhosted.org/packages/8c/e9/6faa6870489ce64f5f75dcf91512bf35af5864583aee8fcb0dcb593121f5/tiktoken-0.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:25a512ff25dc6c85b58f5dd4f3d8c674dc05f96b02d66cdacf628d26a4e4866b", size = 999334, upload-time = "2025-08-08T23:57:38.595Z" }, + { url = "https://files.pythonhosted.org/packages/a1/3e/a05d1547cf7db9dc75d1461cfa7b556a3b48e0516ec29dfc81d984a145f6/tiktoken-0.11.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2130127471e293d385179c1f3f9cd445070c0772be73cdafb7cec9a3684c0458", size = 1129402, upload-time = "2025-08-08T23:57:39.627Z" }, + { url = "https://files.pythonhosted.org/packages/34/9a/db7a86b829e05a01fd4daa492086f708e0a8b53952e1dbc9d380d2b03677/tiktoken-0.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21e43022bf2c33f733ea9b54f6a3f6b4354b909f5a73388fb1b9347ca54a069c", size = 1184046, upload-time = "2025-08-08T23:57:40.689Z" }, + { url = "https://files.pythonhosted.org/packages/9d/bb/52edc8e078cf062ed749248f1454e9e5cfd09979baadb830b3940e522015/tiktoken-0.11.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:adb4e308eb64380dc70fa30493e21c93475eaa11669dea313b6bbf8210bfd013", size = 1244691, upload-time = "2025-08-08T23:57:42.251Z" }, + { url = "https://files.pythonhosted.org/packages/60/d9/884b6cd7ae2570ecdcaffa02b528522b18fef1cbbfdbcaa73799807d0d3b/tiktoken-0.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:ece6b76bfeeb61a125c44bbefdfccc279b5288e6007fbedc0d32bfec602df2f2", size = 884392, upload-time = "2025-08-08T23:57:43.628Z" }, + { url = "https://files.pythonhosted.org/packages/e7/9e/eceddeffc169fc75fe0fd4f38471309f11cb1906f9b8aa39be4f5817df65/tiktoken-0.11.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fd9e6b23e860973cf9526544e220b223c60badf5b62e80a33509d6d40e6c8f5d", size = 1055199, upload-time = "2025-08-08T23:57:45.076Z" }, + { url = "https://files.pythonhosted.org/packages/4f/cf/5f02bfefffdc6b54e5094d2897bc80efd43050e5b09b576fd85936ee54bf/tiktoken-0.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6a76d53cee2da71ee2731c9caa747398762bda19d7f92665e882fef229cb0b5b", size = 996655, upload-time = "2025-08-08T23:57:46.304Z" }, + { url = "https://files.pythonhosted.org/packages/65/8e/c769b45ef379bc360c9978c4f6914c79fd432400a6733a8afc7ed7b0726a/tiktoken-0.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ef72aab3ea240646e642413cb363b73869fed4e604dcfd69eec63dc54d603e8", size = 1128867, upload-time = "2025-08-08T23:57:47.438Z" }, + { url = "https://files.pythonhosted.org/packages/d5/2d/4d77f6feb9292bfdd23d5813e442b3bba883f42d0ac78ef5fdc56873f756/tiktoken-0.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f929255c705efec7a28bf515e29dc74220b2f07544a8c81b8d69e8efc4578bd", size = 1183308, upload-time = "2025-08-08T23:57:48.566Z" }, + { url = "https://files.pythonhosted.org/packages/7a/65/7ff0a65d3bb0fc5a1fb6cc71b03e0f6e71a68c5eea230d1ff1ba3fd6df49/tiktoken-0.11.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:61f1d15822e4404953d499fd1dcc62817a12ae9fb1e4898033ec8fe3915fdf8e", size = 1244301, upload-time = "2025-08-08T23:57:49.642Z" }, + { url = "https://files.pythonhosted.org/packages/f5/6e/5b71578799b72e5bdcef206a214c3ce860d999d579a3b56e74a6c8989ee2/tiktoken-0.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:45927a71ab6643dfd3ef57d515a5db3d199137adf551f66453be098502838b0f", size = 884282, upload-time = "2025-08-08T23:57:50.759Z" }, + { url = "https://files.pythonhosted.org/packages/cc/cd/a9034bcee638716d9310443818d73c6387a6a96db93cbcb0819b77f5b206/tiktoken-0.11.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a5f3f25ffb152ee7fec78e90a5e5ea5b03b4ea240beed03305615847f7a6ace2", size = 1055339, upload-time = "2025-08-08T23:57:51.802Z" }, + { url = "https://files.pythonhosted.org/packages/f1/91/9922b345f611b4e92581f234e64e9661e1c524875c8eadd513c4b2088472/tiktoken-0.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7dc6e9ad16a2a75b4c4be7208055a1f707c9510541d94d9cc31f7fbdc8db41d8", size = 997080, upload-time = "2025-08-08T23:57:53.442Z" }, + { url = "https://files.pythonhosted.org/packages/d0/9d/49cd047c71336bc4b4af460ac213ec1c457da67712bde59b892e84f1859f/tiktoken-0.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a0517634d67a8a48fd4a4ad73930c3022629a85a217d256a6e9b8b47439d1e4", size = 1128501, upload-time = "2025-08-08T23:57:54.808Z" }, + { url = "https://files.pythonhosted.org/packages/52/d5/a0dcdb40dd2ea357e83cb36258967f0ae96f5dd40c722d6e382ceee6bba9/tiktoken-0.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fb4effe60574675118b73c6fbfd3b5868e5d7a1f570d6cc0d18724b09ecf318", size = 1182743, upload-time = "2025-08-08T23:57:56.307Z" }, + { url = "https://files.pythonhosted.org/packages/3b/17/a0fc51aefb66b7b5261ca1314afa83df0106b033f783f9a7bcbe8e741494/tiktoken-0.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94f984c9831fd32688aef4348803b0905d4ae9c432303087bae370dc1381a2b8", size = 1244057, upload-time = "2025-08-08T23:57:57.628Z" }, + { url = "https://files.pythonhosted.org/packages/50/79/bcf350609f3a10f09fe4fc207f132085e497fdd3612f3925ab24d86a0ca0/tiktoken-0.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:2177ffda31dec4023356a441793fed82f7af5291120751dee4d696414f54db0c", size = 883901, upload-time = "2025-08-08T23:57:59.359Z" }, +] + [[package]] name = "tinycss2" version = "1.4.0" @@ -5520,3 +5592,12 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/94/c3/b2e9f38bc3e11191981d57ea08cab2166e74ea770024a646617c9cddd9f6/yarl-1.20.1-cp313-cp313t-win_amd64.whl", hash = "sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f", size = 93003, upload-time = "2025-06-10T00:45:27.752Z" }, { url = "https://files.pythonhosted.org/packages/b4/2d/2345fce04cfd4bee161bf1e7d9cdc702e3e16109021035dbb24db654a622/yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77", size = 46542, upload-time = "2025-06-10T00:46:07.521Z" }, ] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +]